LLVM 20.0.0git
RISCVFrameLowering.cpp
Go to the documentation of this file.
1//===-- RISCVFrameLowering.cpp - RISC-V Frame Information -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the RISC-V implementation of TargetFrameLowering class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "RISCVFrameLowering.h"
15#include "RISCVSubtarget.h"
24#include "llvm/MC/MCDwarf.h"
25#include "llvm/Support/LEB128.h"
26
27#include <algorithm>
28
29using namespace llvm;
30
31namespace {
32
33class CFISaveRegisterEmitter {
36
37public:
38 CFISaveRegisterEmitter(MachineFunction &MF)
39 : MF{MF}, MFI{MF.getFrameInfo()} {};
40
42 const RISCVRegisterInfo &RI, const RISCVInstrInfo &TII,
43 const DebugLoc &DL, const CalleeSavedInfo &CS) const {
44 int FrameIdx = CS.getFrameIdx();
45 int64_t Offset = MFI.getObjectOffset(FrameIdx);
46 Register Reg = CS.getReg();
47 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset(
48 nullptr, RI.getDwarfRegNum(Reg, true), Offset));
49 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
50 .addCFIIndex(CFIIndex)
52 }
53};
54
55class CFIRestoreRegisterEmitter {
57
58public:
59 CFIRestoreRegisterEmitter(MachineFunction &MF) : MF{MF} {};
60
62 const RISCVRegisterInfo &RI, const RISCVInstrInfo &TII,
63 const DebugLoc &DL, const CalleeSavedInfo &CS) const {
64 Register Reg = CS.getReg();
65 unsigned CFIIndex = MF.addFrameInst(
66 MCCFIInstruction::createRestore(nullptr, RI.getDwarfRegNum(Reg, true)));
67 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
68 .addCFIIndex(CFIIndex)
70 }
71};
72
73} // namespace
74
75template <typename Emitter>
76void RISCVFrameLowering::emitCFIForCSI(
78 const SmallVector<CalleeSavedInfo, 8> &CSI) const {
83
84 Emitter E{*MF};
85 for (const auto &CS : CSI)
86 E.emit(MBB, MBBI, *RI, *TII, DL, CS);
87}
88
90 if (ABI == RISCVABI::ABI_ILP32E)
91 return Align(4);
92 if (ABI == RISCVABI::ABI_LP64E)
93 return Align(8);
94 return Align(16);
95}
96
99 StackGrowsDown, getABIStackAlignment(STI.getTargetABI()),
100 /*LocalAreaOffset=*/0,
101 /*TransientStackAlignment=*/getABIStackAlignment(STI.getTargetABI())),
102 STI(STI) {}
103
104// The register used to hold the frame pointer.
105static constexpr Register FPReg = RISCV::X8;
106
107// The register used to hold the stack pointer.
108static constexpr Register SPReg = RISCV::X2;
109
110// The register used to hold the return address.
111static constexpr Register RAReg = RISCV::X1;
112
113// Offsets which need to be scale by XLen representing locations of CSRs which
114// are given a fixed location by save/restore libcalls or Zcmp Push/Pop.
115static const std::pair<MCPhysReg, int8_t> FixedCSRFIMap[] = {
116 {/*ra*/ RAReg, -1}, {/*s0*/ FPReg, -2},
117 {/*s1*/ RISCV::X9, -3}, {/*s2*/ RISCV::X18, -4},
118 {/*s3*/ RISCV::X19, -5}, {/*s4*/ RISCV::X20, -6},
119 {/*s5*/ RISCV::X21, -7}, {/*s6*/ RISCV::X22, -8},
120 {/*s7*/ RISCV::X23, -9}, {/*s8*/ RISCV::X24, -10},
121 {/*s9*/ RISCV::X25, -11}, {/*s10*/ RISCV::X26, -12},
122 {/*s11*/ RISCV::X27, -13}};
123
124// For now we use x3, a.k.a gp, as pointer to shadow call stack.
125// User should not use x3 in their asm.
128 const DebugLoc &DL) {
129 const auto &STI = MF.getSubtarget<RISCVSubtarget>();
130 bool HasHWShadowStack = MF.getFunction().hasFnAttribute("hw-shadow-stack") &&
131 STI.hasStdExtZicfiss();
132 bool HasSWShadowStack =
133 MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack);
134 if (!HasHWShadowStack && !HasSWShadowStack)
135 return;
136
137 const llvm::RISCVRegisterInfo *TRI = STI.getRegisterInfo();
138 Register RAReg = TRI->getRARegister();
139
140 // Do not save RA to the SCS if it's not saved to the regular stack,
141 // i.e. RA is not at risk of being overwritten.
142 std::vector<CalleeSavedInfo> &CSI = MF.getFrameInfo().getCalleeSavedInfo();
143 if (llvm::none_of(
144 CSI, [&](CalleeSavedInfo &CSR) { return CSR.getReg() == RAReg; }))
145 return;
146
147 const RISCVInstrInfo *TII = STI.getInstrInfo();
148 if (HasHWShadowStack) {
149 BuildMI(MBB, MI, DL, TII->get(RISCV::SSPUSH)).addReg(RAReg);
150 return;
151 }
152
153 Register SCSPReg = RISCVABI::getSCSPReg();
154
155 bool IsRV64 = STI.is64Bit();
156 int64_t SlotSize = STI.getXLen() / 8;
157 // Store return address to shadow call stack
158 // addi gp, gp, [4|8]
159 // s[w|d] ra, -[4|8](gp)
160 BuildMI(MBB, MI, DL, TII->get(RISCV::ADDI))
161 .addReg(SCSPReg, RegState::Define)
162 .addReg(SCSPReg)
163 .addImm(SlotSize)
165 BuildMI(MBB, MI, DL, TII->get(IsRV64 ? RISCV::SD : RISCV::SW))
166 .addReg(RAReg)
167 .addReg(SCSPReg)
168 .addImm(-SlotSize)
170
171 // Emit a CFI instruction that causes SlotSize to be subtracted from the value
172 // of the shadow stack pointer when unwinding past this frame.
173 char DwarfSCSReg = TRI->getDwarfRegNum(SCSPReg, /*IsEH*/ true);
174 assert(DwarfSCSReg < 32 && "SCS Register should be < 32 (X3).");
175
176 char Offset = static_cast<char>(-SlotSize) & 0x7f;
177 const char CFIInst[] = {
178 dwarf::DW_CFA_val_expression,
179 DwarfSCSReg, // register
180 2, // length
181 static_cast<char>(unsigned(dwarf::DW_OP_breg0 + DwarfSCSReg)),
182 Offset, // addend (sleb128)
183 };
184
185 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createEscape(
186 nullptr, StringRef(CFIInst, sizeof(CFIInst))));
187 BuildMI(MBB, MI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
188 .addCFIIndex(CFIIndex)
190}
191
194 const DebugLoc &DL) {
195 const auto &STI = MF.getSubtarget<RISCVSubtarget>();
196 bool HasHWShadowStack = MF.getFunction().hasFnAttribute("hw-shadow-stack") &&
197 STI.hasStdExtZicfiss();
198 bool HasSWShadowStack =
199 MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack);
200 if (!HasHWShadowStack && !HasSWShadowStack)
201 return;
202
203 Register RAReg = STI.getRegisterInfo()->getRARegister();
204
205 // See emitSCSPrologue() above.
206 std::vector<CalleeSavedInfo> &CSI = MF.getFrameInfo().getCalleeSavedInfo();
207 if (llvm::none_of(
208 CSI, [&](CalleeSavedInfo &CSR) { return CSR.getReg() == RAReg; }))
209 return;
210
211 const RISCVInstrInfo *TII = STI.getInstrInfo();
212 if (HasHWShadowStack) {
213 BuildMI(MBB, MI, DL, TII->get(RISCV::SSPOPCHK)).addReg(RAReg);
214 return;
215 }
216
217 Register SCSPReg = RISCVABI::getSCSPReg();
218
219 bool IsRV64 = STI.is64Bit();
220 int64_t SlotSize = STI.getXLen() / 8;
221 // Load return address from shadow call stack
222 // l[w|d] ra, -[4|8](gp)
223 // addi gp, gp, -[4|8]
224 BuildMI(MBB, MI, DL, TII->get(IsRV64 ? RISCV::LD : RISCV::LW))
226 .addReg(SCSPReg)
227 .addImm(-SlotSize)
229 BuildMI(MBB, MI, DL, TII->get(RISCV::ADDI))
230 .addReg(SCSPReg, RegState::Define)
231 .addReg(SCSPReg)
232 .addImm(-SlotSize)
234 // Restore the SCS pointer
235 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createRestore(
236 nullptr, STI.getRegisterInfo()->getDwarfRegNum(SCSPReg, /*IsEH*/ true)));
237 BuildMI(MBB, MI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
238 .addCFIIndex(CFIIndex)
240}
241
242// Get the ID of the libcall used for spilling and restoring callee saved
243// registers. The ID is representative of the number of registers saved or
244// restored by the libcall, except it is zero-indexed - ID 0 corresponds to a
245// single register.
246static int getLibCallID(const MachineFunction &MF,
247 const std::vector<CalleeSavedInfo> &CSI) {
248 const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
249
250 if (CSI.empty() || !RVFI->useSaveRestoreLibCalls(MF))
251 return -1;
252
253 Register MaxReg = RISCV::NoRegister;
254 for (auto &CS : CSI)
255 // assignCalleeSavedSpillSlots assigns negative frame indexes to
256 // registers which can be saved by libcall.
257 if (CS.getFrameIdx() < 0)
258 MaxReg = std::max(MaxReg.id(), CS.getReg().id());
259
260 if (MaxReg == RISCV::NoRegister)
261 return -1;
262
263 switch (MaxReg) {
264 default:
265 llvm_unreachable("Something has gone wrong!");
266 // clang-format off
267 case /*s11*/ RISCV::X27: return 12;
268 case /*s10*/ RISCV::X26: return 11;
269 case /*s9*/ RISCV::X25: return 10;
270 case /*s8*/ RISCV::X24: return 9;
271 case /*s7*/ RISCV::X23: return 8;
272 case /*s6*/ RISCV::X22: return 7;
273 case /*s5*/ RISCV::X21: return 6;
274 case /*s4*/ RISCV::X20: return 5;
275 case /*s3*/ RISCV::X19: return 4;
276 case /*s2*/ RISCV::X18: return 3;
277 case /*s1*/ RISCV::X9: return 2;
278 case /*s0*/ FPReg: return 1;
279 case /*ra*/ RAReg: return 0;
280 // clang-format on
281 }
282}
283
284// Get the name of the libcall used for spilling callee saved registers.
285// If this function will not use save/restore libcalls, then return a nullptr.
286static const char *
288 const std::vector<CalleeSavedInfo> &CSI) {
289 static const char *const SpillLibCalls[] = {
290 "__riscv_save_0",
291 "__riscv_save_1",
292 "__riscv_save_2",
293 "__riscv_save_3",
294 "__riscv_save_4",
295 "__riscv_save_5",
296 "__riscv_save_6",
297 "__riscv_save_7",
298 "__riscv_save_8",
299 "__riscv_save_9",
300 "__riscv_save_10",
301 "__riscv_save_11",
302 "__riscv_save_12"
303 };
304
305 int LibCallID = getLibCallID(MF, CSI);
306 if (LibCallID == -1)
307 return nullptr;
308 return SpillLibCalls[LibCallID];
309}
310
311// Get the name of the libcall used for restoring callee saved registers.
312// If this function will not use save/restore libcalls, then return a nullptr.
313static const char *
315 const std::vector<CalleeSavedInfo> &CSI) {
316 static const char *const RestoreLibCalls[] = {
317 "__riscv_restore_0",
318 "__riscv_restore_1",
319 "__riscv_restore_2",
320 "__riscv_restore_3",
321 "__riscv_restore_4",
322 "__riscv_restore_5",
323 "__riscv_restore_6",
324 "__riscv_restore_7",
325 "__riscv_restore_8",
326 "__riscv_restore_9",
327 "__riscv_restore_10",
328 "__riscv_restore_11",
329 "__riscv_restore_12"
330 };
331
332 int LibCallID = getLibCallID(MF, CSI);
333 if (LibCallID == -1)
334 return nullptr;
335 return RestoreLibCalls[LibCallID];
336}
337
338// Return encoded value and register count for PUSH/POP instruction,
339// representing registers to store/load.
340static std::pair<unsigned, unsigned>
342 switch (MaxReg) {
343 default:
344 llvm_unreachable("Unexpected Reg for Push/Pop Inst");
345 case RISCV::X27: /*s11*/
346 return std::make_pair(llvm::RISCVZC::RLISTENCODE::RA_S0_S11, 13);
347 case RISCV::X25: /*s9*/
348 return std::make_pair(llvm::RISCVZC::RLISTENCODE::RA_S0_S9, 11);
349 case RISCV::X24: /*s8*/
350 return std::make_pair(llvm::RISCVZC::RLISTENCODE::RA_S0_S8, 10);
351 case RISCV::X23: /*s7*/
352 return std::make_pair(llvm::RISCVZC::RLISTENCODE::RA_S0_S7, 9);
353 case RISCV::X22: /*s6*/
354 return std::make_pair(llvm::RISCVZC::RLISTENCODE::RA_S0_S6, 8);
355 case RISCV::X21: /*s5*/
356 return std::make_pair(llvm::RISCVZC::RLISTENCODE::RA_S0_S5, 7);
357 case RISCV::X20: /*s4*/
358 return std::make_pair(llvm::RISCVZC::RLISTENCODE::RA_S0_S4, 6);
359 case RISCV::X19: /*s3*/
360 return std::make_pair(llvm::RISCVZC::RLISTENCODE::RA_S0_S3, 5);
361 case RISCV::X18: /*s2*/
362 return std::make_pair(llvm::RISCVZC::RLISTENCODE::RA_S0_S2, 4);
363 case RISCV::X9: /*s1*/
364 return std::make_pair(llvm::RISCVZC::RLISTENCODE::RA_S0_S1, 3);
365 case FPReg: /*s0*/
366 return std::make_pair(llvm::RISCVZC::RLISTENCODE::RA_S0, 2);
367 case RAReg: /*ra*/
368 return std::make_pair(llvm::RISCVZC::RLISTENCODE::RA, 1);
369 }
370}
371
372// Get the max reg of Push/Pop for restoring callee saved registers.
374 const std::vector<CalleeSavedInfo> &CSI) {
375 Register MaxPushPopReg = RISCV::NoRegister;
376 for (auto &CS : CSI) {
377 if (llvm::find_if(FixedCSRFIMap, [&](auto P) {
378 return P.first == CS.getReg();
379 }) != std::end(FixedCSRFIMap))
380 MaxPushPopReg = std::max(MaxPushPopReg.id(), CS.getReg().id());
381 }
382 assert(MaxPushPopReg != RISCV::X26 && "x26 requires x27 to also be pushed");
383 return MaxPushPopReg;
384}
385
386// Return true if the specified function should have a dedicated frame
387// pointer register. This is true if frame pointer elimination is
388// disabled, if it needs dynamic stack realignment, if the function has
389// variable sized allocas, or if the frame address is taken.
391 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
392
393 const MachineFrameInfo &MFI = MF.getFrameInfo();
394 return MF.getTarget().Options.DisableFramePointerElim(MF) ||
395 RegInfo->hasStackRealignment(MF) || MFI.hasVarSizedObjects() ||
397}
398
400 const MachineFrameInfo &MFI = MF.getFrameInfo();
402
403 // If we do not reserve stack space for outgoing arguments in prologue,
404 // we will adjust the stack pointer before call instruction. After the
405 // adjustment, we can not use SP to access the stack objects for the
406 // arguments. Instead, use BP to access these stack objects.
407 return (MFI.hasVarSizedObjects() ||
409 MFI.getMaxCallFrameSize() != 0))) &&
410 TRI->hasStackRealignment(MF);
411}
412
413// Determines the size of the frame and maximum call frame size.
414void RISCVFrameLowering::determineFrameLayout(MachineFunction &MF) const {
415 MachineFrameInfo &MFI = MF.getFrameInfo();
416 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
417
418 // Get the number of bytes to allocate from the FrameInfo.
419 uint64_t FrameSize = MFI.getStackSize();
420
421 // Get the alignment.
422 Align StackAlign = getStackAlign();
423
424 // Make sure the frame is aligned.
425 FrameSize = alignTo(FrameSize, StackAlign);
426
427 // Update frame info.
428 MFI.setStackSize(FrameSize);
429
430 // When using SP or BP to access stack objects, we may require extra padding
431 // to ensure the bottom of the RVV stack is correctly aligned within the main
432 // stack. We calculate this as the amount required to align the scalar local
433 // variable section up to the RVV alignment.
435 if (RVFI->getRVVStackSize() && (!hasFP(MF) || TRI->hasStackRealignment(MF))) {
436 int ScalarLocalVarSize = FrameSize - RVFI->getCalleeSavedStackSize() -
437 RVFI->getVarArgsSaveSize();
438 if (auto RVVPadding =
439 offsetToAlignment(ScalarLocalVarSize, RVFI->getRVVStackAlign()))
440 RVFI->setRVVPadding(RVVPadding);
441 }
442}
443
444// Returns the stack size including RVV padding (when required), rounded back
445// up to the required stack alignment.
447 const MachineFunction &MF) const {
448 const MachineFrameInfo &MFI = MF.getFrameInfo();
449 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
450 return alignTo(MFI.getStackSize() + RVFI->getRVVPadding(), getStackAlign());
451}
452
455 const std::vector<CalleeSavedInfo> &CSI) {
456 const MachineFrameInfo &MFI = MF.getFrameInfo();
458
459 for (auto &CS : CSI) {
460 int FI = CS.getFrameIdx();
461 if (FI >= 0 && MFI.getStackID(FI) == TargetStackID::Default)
462 NonLibcallCSI.push_back(CS);
463 }
464
465 return NonLibcallCSI;
466}
467
470 const std::vector<CalleeSavedInfo> &CSI) {
471 const MachineFrameInfo &MFI = MF.getFrameInfo();
473
474 for (auto &CS : CSI) {
475 int FI = CS.getFrameIdx();
476 if (FI >= 0 && MFI.getStackID(FI) == TargetStackID::ScalableVector)
477 RVVCSI.push_back(CS);
478 }
479
480 return RVVCSI;
481}
482
485 const std::vector<CalleeSavedInfo> &CSI) {
486 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
487
488 SmallVector<CalleeSavedInfo, 8> PushOrLibCallsCSI;
489 if (!RVFI->useSaveRestoreLibCalls(MF) && !RVFI->isPushable(MF))
490 return PushOrLibCallsCSI;
491
492 for (const auto &CS : CSI) {
493 const auto *FII = llvm::find_if(
494 FixedCSRFIMap, [&](auto P) { return P.first == CS.getReg(); });
495 if (FII != std::end(FixedCSRFIMap))
496 PushOrLibCallsCSI.push_back(CS);
497 }
498
499 return PushOrLibCallsCSI;
500}
501
504 int FixedOffset, int ScalableOffset,
505 llvm::raw_string_ostream &Comment) {
506 unsigned DwarfVLenB = TRI.getDwarfRegNum(RISCV::VLENB, true);
507 uint8_t Buffer[16];
508 if (FixedOffset) {
509 Expr.push_back(dwarf::DW_OP_consts);
510 Expr.append(Buffer, Buffer + encodeSLEB128(FixedOffset, Buffer));
511 Expr.push_back((uint8_t)dwarf::DW_OP_plus);
512 Comment << (FixedOffset < 0 ? " - " : " + ") << std::abs(FixedOffset);
513 }
514
515 Expr.push_back((uint8_t)dwarf::DW_OP_consts);
516 Expr.append(Buffer, Buffer + encodeSLEB128(ScalableOffset, Buffer));
517
518 Expr.push_back((uint8_t)dwarf::DW_OP_bregx);
519 Expr.append(Buffer, Buffer + encodeULEB128(DwarfVLenB, Buffer));
520 Expr.push_back(0);
521
522 Expr.push_back((uint8_t)dwarf::DW_OP_mul);
523 Expr.push_back((uint8_t)dwarf::DW_OP_plus);
524
525 Comment << (ScalableOffset < 0 ? " - " : " + ") << std::abs(ScalableOffset)
526 << " * vlenb";
527}
528
530 Register Reg,
531 uint64_t FixedOffset,
532 uint64_t ScalableOffset) {
533 assert(ScalableOffset != 0 && "Did not need to adjust CFA for RVV");
534 SmallString<64> Expr;
535 std::string CommentBuffer;
536 llvm::raw_string_ostream Comment(CommentBuffer);
537 // Build up the expression (Reg + FixedOffset + ScalableOffset * VLENB).
538 unsigned DwarfReg = TRI.getDwarfRegNum(Reg, true);
539 Expr.push_back((uint8_t)(dwarf::DW_OP_breg0 + DwarfReg));
540 Expr.push_back(0);
541 if (Reg == SPReg)
542 Comment << "sp";
543 else
544 Comment << printReg(Reg, &TRI);
545
546 appendScalableVectorExpression(TRI, Expr, FixedOffset, ScalableOffset,
547 Comment);
548
549 SmallString<64> DefCfaExpr;
550 uint8_t Buffer[16];
551 DefCfaExpr.push_back(dwarf::DW_CFA_def_cfa_expression);
552 DefCfaExpr.append(Buffer, Buffer + encodeULEB128(Expr.size(), Buffer));
553 DefCfaExpr.append(Expr.str());
554
555 return MCCFIInstruction::createEscape(nullptr, DefCfaExpr.str(), SMLoc(),
556 Comment.str());
557}
558
560 Register Reg, uint64_t FixedOffset,
561 uint64_t ScalableOffset) {
562 assert(ScalableOffset != 0 && "Did not need to adjust CFA for RVV");
563 SmallString<64> Expr;
564 std::string CommentBuffer;
565 llvm::raw_string_ostream Comment(CommentBuffer);
566 Comment << printReg(Reg, &TRI) << " @ cfa";
567
568 // Build up the expression (FixedOffset + ScalableOffset * VLENB).
569 appendScalableVectorExpression(TRI, Expr, FixedOffset, ScalableOffset,
570 Comment);
571
572 SmallString<64> DefCfaExpr;
573 uint8_t Buffer[16];
574 unsigned DwarfReg = TRI.getDwarfRegNum(Reg, true);
575 DefCfaExpr.push_back(dwarf::DW_CFA_expression);
576 DefCfaExpr.append(Buffer, Buffer + encodeULEB128(DwarfReg, Buffer));
577 DefCfaExpr.append(Buffer, Buffer + encodeULEB128(Expr.size(), Buffer));
578 DefCfaExpr.append(Expr.str());
579
580 return MCCFIInstruction::createEscape(nullptr, DefCfaExpr.str(), SMLoc(),
581 Comment.str());
582}
583
584// Allocate stack space and probe it if necessary.
588 uint64_t RealStackSize, bool EmitCFI,
589 bool NeedProbe,
590 uint64_t ProbeSize) const {
591 DebugLoc DL;
594
595 // Simply allocate the stack if it's not big enough to require a probe.
596 if (!NeedProbe || Offset <= ProbeSize) {
599
600 if (EmitCFI) {
601 // Emit ".cfi_def_cfa_offset RealStackSize"
602 unsigned CFIIndex = MF.addFrameInst(
603 MCCFIInstruction::cfiDefCfaOffset(nullptr, RealStackSize));
604 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
605 .addCFIIndex(CFIIndex)
607 }
608
609 return;
610 }
611
612 // Unroll the probe loop depending on the number of iterations.
613 if (Offset < ProbeSize * 5) {
614 uint64_t CurrentOffset = 0;
615 bool IsRV64 = STI.is64Bit();
616 while (CurrentOffset + ProbeSize <= Offset) {
617 RI->adjustReg(MBB, MBBI, DL, SPReg, SPReg,
619 getStackAlign());
620 // s[d|w] zero, 0(sp)
621 BuildMI(MBB, MBBI, DL, TII->get(IsRV64 ? RISCV::SD : RISCV::SW))
622 .addReg(RISCV::X0)
623 .addReg(SPReg)
624 .addImm(0)
626
627 CurrentOffset += ProbeSize;
628 if (EmitCFI) {
629 // Emit ".cfi_def_cfa_offset CurrentOffset"
630 unsigned CFIIndex = MF.addFrameInst(
631 MCCFIInstruction::cfiDefCfaOffset(nullptr, CurrentOffset));
632 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
633 .addCFIIndex(CFIIndex)
635 }
636 }
637
638 uint64_t Residual = Offset - CurrentOffset;
639 if (Residual) {
640 RI->adjustReg(MBB, MBBI, DL, SPReg, SPReg,
642 getStackAlign());
643 if (EmitCFI) {
644 // Emit ".cfi_def_cfa_offset Offset"
645 unsigned CFIIndex =
647 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
648 .addCFIIndex(CFIIndex)
650 }
651 }
652
653 return;
654 }
655
656 // Emit a variable-length allocation probing loop.
657 uint64_t RoundedSize = alignDown(Offset, ProbeSize);
658 uint64_t Residual = Offset - RoundedSize;
659
660 Register TargetReg = RISCV::X6;
661 // SUB TargetReg, SP, RoundedSize
662 RI->adjustReg(MBB, MBBI, DL, TargetReg, SPReg,
664 getStackAlign());
665
666 if (EmitCFI) {
667 // Set the CFA register to TargetReg.
668 unsigned Reg = STI.getRegisterInfo()->getDwarfRegNum(TargetReg, true);
669 unsigned CFIIndex =
670 MF.addFrameInst(MCCFIInstruction::cfiDefCfa(nullptr, Reg, RoundedSize));
671 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
672 .addCFIIndex(CFIIndex)
674 }
675
676 // It will be expanded to a probe loop in `inlineStackProbe`.
677 BuildMI(MBB, MBBI, DL, TII->get(RISCV::PROBED_STACKALLOC))
678 .addReg(SPReg)
679 .addReg(TargetReg);
680
681 if (EmitCFI) {
682 // Set the CFA register back to SP.
683 unsigned Reg = STI.getRegisterInfo()->getDwarfRegNum(SPReg, true);
684 unsigned CFIIndex =
686 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
687 .addCFIIndex(CFIIndex)
689 }
690
691 if (Residual)
694
695 if (EmitCFI) {
696 // Emit ".cfi_def_cfa_offset Offset"
697 unsigned CFIIndex =
699 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
700 .addCFIIndex(CFIIndex)
702 }
703}
704
706 MachineBasicBlock &MBB) const {
707 MachineFrameInfo &MFI = MF.getFrameInfo();
708 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
712
714
715 // Debug location must be unknown since the first debug location is used
716 // to determine the end of the prologue.
717 DebugLoc DL;
718
719 // All calls are tail calls in GHC calling conv, and functions have no
720 // prologue/epilogue.
722 return;
723
724 // Emit prologue for shadow call stack.
725 emitSCSPrologue(MF, MBB, MBBI, DL);
726
727 auto FirstFrameSetup = MBBI;
728
729 // Since spillCalleeSavedRegisters may have inserted a libcall, skip past
730 // any instructions marked as FrameSetup
731 while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup))
732 ++MBBI;
733
734 // Determine the correct frame layout
735 determineFrameLayout(MF);
736
737 const auto &CSI = MFI.getCalleeSavedInfo();
738
739 // If libcalls are used to spill and restore callee-saved registers, the frame
740 // has two sections; the opaque section managed by the libcalls, and the
741 // section managed by MachineFrameInfo which can also hold callee saved
742 // registers in fixed stack slots, both of which have negative frame indices.
743 // This gets even more complicated when incoming arguments are passed via the
744 // stack, as these too have negative frame indices. An example is detailed
745 // below:
746 //
747 // | incoming arg | <- FI[-3]
748 // | libcallspill |
749 // | calleespill | <- FI[-2]
750 // | calleespill | <- FI[-1]
751 // | this_frame | <- FI[0]
752 //
753 // For negative frame indices, the offset from the frame pointer will differ
754 // depending on which of these groups the frame index applies to.
755 // The following calculates the correct offset knowing the number of callee
756 // saved registers spilt by the two methods.
757 if (int LibCallRegs = getLibCallID(MF, MFI.getCalleeSavedInfo()) + 1) {
758 // Calculate the size of the frame managed by the libcall. The stack
759 // alignment of these libcalls should be the same as how we set it in
760 // getABIStackAlignment.
761 unsigned LibCallFrameSize =
762 alignTo((STI.getXLen() / 8) * LibCallRegs, getStackAlign());
763 RVFI->setLibCallStackSize(LibCallFrameSize);
764
765 unsigned CFIIndex = MF.addFrameInst(
766 MCCFIInstruction::cfiDefCfaOffset(nullptr, LibCallFrameSize));
767 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
768 .addCFIIndex(CFIIndex)
770
771 emitCFIForCSI<CFISaveRegisterEmitter>(MBB, MBBI,
773 }
774
775 // FIXME (note copied from Lanai): This appears to be overallocating. Needs
776 // investigation. Get the number of bytes to allocate from the FrameInfo.
777 uint64_t RealStackSize = getStackSizeWithRVVPadding(MF);
778 uint64_t StackSize = RealStackSize - RVFI->getReservedSpillsSize();
779 uint64_t RVVStackSize = RVFI->getRVVStackSize();
780
781 // Early exit if there is no need to allocate on the stack
782 if (RealStackSize == 0 && !MFI.adjustsStack() && RVVStackSize == 0)
783 return;
784
785 // If the stack pointer has been marked as reserved, then produce an error if
786 // the frame requires stack allocation
789 MF.getFunction(), "Stack pointer required, but has been reserved."});
790
791 uint64_t FirstSPAdjustAmount = getFirstSPAdjustAmount(MF);
792 // Split the SP adjustment to reduce the offsets of callee saved spill.
793 if (FirstSPAdjustAmount) {
794 StackSize = FirstSPAdjustAmount;
795 RealStackSize = FirstSPAdjustAmount;
796 }
797
798 if (RVFI->isPushable(MF) && FirstFrameSetup != MBB.end() &&
799 FirstFrameSetup->getOpcode() == RISCV::CM_PUSH) {
800 // Use available stack adjustment in push instruction to allocate additional
801 // stack space. Align the stack size down to a multiple of 16. This is
802 // needed for RVE.
803 // FIXME: Can we increase the stack size to a multiple of 16 instead?
804 uint64_t Spimm =
805 std::min(alignDown(StackSize, 16), static_cast<uint64_t>(48));
806 FirstFrameSetup->getOperand(1).setImm(Spimm);
807 StackSize -= Spimm;
808
809 unsigned CFIIndex = MF.addFrameInst(
810 MCCFIInstruction::cfiDefCfaOffset(nullptr, RealStackSize - StackSize));
811 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
812 .addCFIIndex(CFIIndex)
814
815 emitCFIForCSI<CFISaveRegisterEmitter>(MBB, MBBI,
817 }
818
819 // Allocate space on the stack if necessary.
820 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
821 const RISCVTargetLowering *TLI = Subtarget.getTargetLowering();
822 bool NeedProbe = TLI->hasInlineStackProbe(MF);
823 uint64_t ProbeSize = TLI->getStackProbeSize(MF, getStackAlign());
824 if (StackSize != 0)
825 allocateStack(MBB, MBBI, MF, StackSize, RealStackSize, /*EmitCFI=*/true,
826 NeedProbe, ProbeSize);
827
828 // The frame pointer is callee-saved, and code has been generated for us to
829 // save it to the stack. We need to skip over the storing of callee-saved
830 // registers as the frame pointer must be modified after it has been saved
831 // to the stack, not before.
832 // FIXME: assumes exactly one instruction is used to save each callee-saved
833 // register.
834 std::advance(MBBI, getUnmanagedCSI(MF, CSI).size());
835
836 // Iterate over list of callee-saved registers and emit .cfi_offset
837 // directives.
838 emitCFIForCSI<CFISaveRegisterEmitter>(MBB, MBBI, getUnmanagedCSI(MF, CSI));
839
840 // Generate new FP.
841 if (hasFP(MF)) {
844 MF.getFunction(), "Frame pointer required, but has been reserved."});
845 // The frame pointer does need to be reserved from register allocation.
846 assert(MF.getRegInfo().isReserved(FPReg) && "FP not reserved");
847
848 RI->adjustReg(MBB, MBBI, DL, FPReg, SPReg,
849 StackOffset::getFixed(RealStackSize - RVFI->getVarArgsSaveSize()),
851
852 // Emit ".cfi_def_cfa $fp, RVFI->getVarArgsSaveSize()"
853 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::cfiDefCfa(
854 nullptr, RI->getDwarfRegNum(FPReg, true), RVFI->getVarArgsSaveSize()));
855 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
856 .addCFIIndex(CFIIndex)
858 }
859
860 // Emit the second SP adjustment after saving callee saved registers.
861 if (FirstSPAdjustAmount) {
862 uint64_t SecondSPAdjustAmount =
863 getStackSizeWithRVVPadding(MF) - FirstSPAdjustAmount;
864 assert(SecondSPAdjustAmount > 0 &&
865 "SecondSPAdjustAmount should be greater than zero");
866
867 allocateStack(MBB, MBBI, MF, SecondSPAdjustAmount,
868 getStackSizeWithRVVPadding(MF), !hasFP(MF), NeedProbe,
869 ProbeSize);
870 }
871
872 if (RVVStackSize) {
873 // We must keep the stack pointer aligned through any intermediate
874 // updates.
875 RI->adjustReg(MBB, MBBI, DL, SPReg, SPReg,
876 StackOffset::getScalable(-RVVStackSize),
878
879 if (!hasFP(MF)) {
880 // Emit .cfi_def_cfa_expression "sp + StackSize + RVVStackSize * vlenb".
881 unsigned CFIIndex = MF.addFrameInst(createDefCFAExpression(
882 *RI, SPReg, getStackSizeWithRVVPadding(MF), RVVStackSize / 8));
883 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
884 .addCFIIndex(CFIIndex)
886 }
887
888 std::advance(MBBI, getRVVCalleeSavedInfo(MF, CSI).size());
889 emitCalleeSavedRVVPrologCFI(MBB, MBBI, hasFP(MF));
890 }
891
892 if (hasFP(MF)) {
893 // Realign Stack
895 if (RI->hasStackRealignment(MF)) {
896 Align MaxAlignment = MFI.getMaxAlign();
897
899 if (isInt<12>(-(int)MaxAlignment.value())) {
900 BuildMI(MBB, MBBI, DL, TII->get(RISCV::ANDI), SPReg)
901 .addReg(SPReg)
902 .addImm(-(int)MaxAlignment.value())
904 } else {
905 unsigned ShiftAmount = Log2(MaxAlignment);
906 Register VR =
907 MF.getRegInfo().createVirtualRegister(&RISCV::GPRRegClass);
908 BuildMI(MBB, MBBI, DL, TII->get(RISCV::SRLI), VR)
909 .addReg(SPReg)
910 .addImm(ShiftAmount)
912 BuildMI(MBB, MBBI, DL, TII->get(RISCV::SLLI), SPReg)
913 .addReg(VR)
914 .addImm(ShiftAmount)
916 }
917 // FP will be used to restore the frame in the epilogue, so we need
918 // another base register BP to record SP after re-alignment. SP will
919 // track the current stack after allocating variable sized objects.
920 if (hasBP(MF)) {
921 // move BP, SP
922 BuildMI(MBB, MBBI, DL, TII->get(RISCV::ADDI), BPReg)
923 .addReg(SPReg)
924 .addImm(0)
926 }
927 }
928 }
929}
930
931void RISCVFrameLowering::deallocateStack(MachineFunction &MF,
934 const DebugLoc &DL,
935 uint64_t &StackSize,
936 int64_t CFAOffset) const {
939
942 StackSize = 0;
943
944 unsigned CFIIndex =
945 MF.addFrameInst(MCCFIInstruction::cfiDefCfaOffset(nullptr, CFAOffset));
946 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
947 .addCFIIndex(CFIIndex)
949}
950
952 MachineBasicBlock &MBB) const {
954 MachineFrameInfo &MFI = MF.getFrameInfo();
955 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
957
958 // All calls are tail calls in GHC calling conv, and functions have no
959 // prologue/epilogue.
961 return;
962
963 // Get the insert location for the epilogue. If there were no terminators in
964 // the block, get the last instruction.
966 DebugLoc DL;
967 if (!MBB.empty()) {
969 if (MBBI != MBB.end())
970 DL = MBBI->getDebugLoc();
971
973
974 // If callee-saved registers are saved via libcall, place stack adjustment
975 // before this call.
976 while (MBBI != MBB.begin() &&
977 std::prev(MBBI)->getFlag(MachineInstr::FrameDestroy))
978 --MBBI;
979 }
980
981 const auto &CSI = MFI.getCalleeSavedInfo();
982
983 // Skip to before the restores of scalar callee-saved registers
984 // FIXME: assumes exactly one instruction is used to restore each
985 // callee-saved register.
986 auto LastFrameDestroy = std::prev(MBBI, getUnmanagedCSI(MF, CSI).size());
987
988 uint64_t FirstSPAdjustAmount = getFirstSPAdjustAmount(MF);
989 uint64_t RealStackSize = FirstSPAdjustAmount ? FirstSPAdjustAmount
991 uint64_t StackSize = FirstSPAdjustAmount ? FirstSPAdjustAmount
993 RVFI->getReservedSpillsSize();
994 uint64_t FPOffset = RealStackSize - RVFI->getVarArgsSaveSize();
995 uint64_t RVVStackSize = RVFI->getRVVStackSize();
996
997 bool RestoreSPFromFP = RI->hasStackRealignment(MF) ||
999 if (RVVStackSize) {
1000 // If RestoreSPFromFP the stack pointer will be restored using the frame
1001 // pointer value.
1002 if (!RestoreSPFromFP)
1003 RI->adjustReg(MBB, LastFrameDestroy, DL, SPReg, SPReg,
1004 StackOffset::getScalable(RVVStackSize),
1006
1007 if (!hasFP(MF)) {
1008 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::cfiDefCfa(
1009 nullptr, RI->getDwarfRegNum(SPReg, true), RealStackSize));
1010 BuildMI(MBB, LastFrameDestroy, DL,
1011 TII->get(TargetOpcode::CFI_INSTRUCTION))
1012 .addCFIIndex(CFIIndex)
1014 }
1015
1016 emitCalleeSavedRVVEpilogCFI(MBB, LastFrameDestroy);
1017 }
1018
1019 if (FirstSPAdjustAmount) {
1020 uint64_t SecondSPAdjustAmount =
1021 getStackSizeWithRVVPadding(MF) - FirstSPAdjustAmount;
1022 assert(SecondSPAdjustAmount > 0 &&
1023 "SecondSPAdjustAmount should be greater than zero");
1024
1025 // If RestoreSPFromFP the stack pointer will be restored using the frame
1026 // pointer value.
1027 if (!RestoreSPFromFP)
1028 RI->adjustReg(MBB, LastFrameDestroy, DL, SPReg, SPReg,
1029 StackOffset::getFixed(SecondSPAdjustAmount),
1031
1032 if (!hasFP(MF)) {
1033 unsigned CFIIndex = MF.addFrameInst(
1034 MCCFIInstruction::cfiDefCfaOffset(nullptr, FirstSPAdjustAmount));
1035 BuildMI(MBB, LastFrameDestroy, DL,
1036 TII->get(TargetOpcode::CFI_INSTRUCTION))
1037 .addCFIIndex(CFIIndex)
1039 }
1040 }
1041
1042 // Restore the stack pointer using the value of the frame pointer. Only
1043 // necessary if the stack pointer was modified, meaning the stack size is
1044 // unknown.
1045 //
1046 // In order to make sure the stack point is right through the EH region,
1047 // we also need to restore stack pointer from the frame pointer if we
1048 // don't preserve stack space within prologue/epilogue for outgoing variables,
1049 // normally it's just checking the variable sized object is present or not
1050 // is enough, but we also don't preserve that at prologue/epilogue when
1051 // have vector objects in stack.
1052 if (RestoreSPFromFP) {
1053 assert(hasFP(MF) && "frame pointer should not have been eliminated");
1054 RI->adjustReg(MBB, LastFrameDestroy, DL, SPReg, FPReg,
1056 getStackAlign());
1057 }
1058
1059 if (hasFP(MF)) {
1060 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::cfiDefCfa(
1061 nullptr, RI->getDwarfRegNum(SPReg, true), RealStackSize));
1062 BuildMI(MBB, LastFrameDestroy, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
1063 .addCFIIndex(CFIIndex)
1065 }
1066
1067 if (getLibCallID(MF, CSI) != -1) {
1068 // tail __riscv_restore_[0-12] instruction is considered as a terminator,
1069 // therefor it is unnecessary to place any CFI instructions after it. Just
1070 // deallocate stack if needed and return.
1071 if (StackSize != 0)
1072 deallocateStack(MF, MBB, MBBI, DL, StackSize,
1073 RVFI->getLibCallStackSize());
1074
1075 // Emit epilogue for shadow call stack.
1076 emitSCSEpilogue(MF, MBB, MBBI, DL);
1077 return;
1078 }
1079
1080 // Recover callee-saved registers.
1081 emitCFIForCSI<CFIRestoreRegisterEmitter>(MBB, MBBI, getUnmanagedCSI(MF, CSI));
1082
1083 bool ApplyPop = RVFI->isPushable(MF) && MBBI != MBB.end() &&
1084 MBBI->getOpcode() == RISCV::CM_POP;
1085 if (ApplyPop) {
1086 // Use available stack adjustment in pop instruction to deallocate stack
1087 // space. Align the stack size down to a multiple of 16. This is needed for
1088 // RVE.
1089 // FIXME: Can we increase the stack size to a multiple of 16 instead?
1090 uint64_t Spimm =
1091 std::min(alignDown(StackSize, 16), static_cast<uint64_t>(48));
1092 MBBI->getOperand(1).setImm(Spimm);
1093 StackSize -= Spimm;
1094
1095 if (StackSize != 0)
1096 deallocateStack(MF, MBB, MBBI, DL, StackSize,
1097 /*stack_adj of cm.pop instr*/ RealStackSize - StackSize);
1098
1099 auto NextI = next_nodbg(MBBI, MBB.end());
1100 if (NextI == MBB.end() || NextI->getOpcode() != RISCV::PseudoRET) {
1101 ++MBBI;
1102
1103 emitCFIForCSI<CFIRestoreRegisterEmitter>(
1105
1106 // Update CFA offset. After CM_POP SP should be equal to CFA, so CFA
1107 // offset should be a zero.
1108 unsigned CFIIndex =
1110 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
1111 .addCFIIndex(CFIIndex)
1113 }
1114 }
1115
1116 // Deallocate stack if StackSize isn't a zero yet
1117 if (StackSize != 0)
1118 deallocateStack(MF, MBB, MBBI, DL, StackSize, 0);
1119
1120 // Emit epilogue for shadow call stack.
1121 emitSCSEpilogue(MF, MBB, MBBI, DL);
1122}
1123
1126 Register &FrameReg) const {
1127 const MachineFrameInfo &MFI = MF.getFrameInfo();
1129 const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
1130
1131 // Callee-saved registers should be referenced relative to the stack
1132 // pointer (positive offset), otherwise use the frame pointer (negative
1133 // offset).
1134 const auto &CSI = getUnmanagedCSI(MF, MFI.getCalleeSavedInfo());
1135 int MinCSFI = 0;
1136 int MaxCSFI = -1;
1138 auto StackID = MFI.getStackID(FI);
1139
1140 assert((StackID == TargetStackID::Default ||
1141 StackID == TargetStackID::ScalableVector) &&
1142 "Unexpected stack ID for the frame object.");
1143 if (StackID == TargetStackID::Default) {
1144 assert(getOffsetOfLocalArea() == 0 && "LocalAreaOffset is not 0!");
1146 MFI.getOffsetAdjustment());
1147 } else if (StackID == TargetStackID::ScalableVector) {
1149 }
1150
1151 uint64_t FirstSPAdjustAmount = getFirstSPAdjustAmount(MF);
1152
1153 if (CSI.size()) {
1154 MinCSFI = CSI[0].getFrameIdx();
1155 MaxCSFI = CSI[CSI.size() - 1].getFrameIdx();
1156 }
1157
1158 if (FI >= MinCSFI && FI <= MaxCSFI) {
1159 FrameReg = SPReg;
1160
1161 if (FirstSPAdjustAmount)
1162 Offset += StackOffset::getFixed(FirstSPAdjustAmount);
1163 else
1165 return Offset;
1166 }
1167
1168 if (RI->hasStackRealignment(MF) && !MFI.isFixedObjectIndex(FI)) {
1169 // If the stack was realigned, the frame pointer is set in order to allow
1170 // SP to be restored, so we need another base register to record the stack
1171 // after realignment.
1172 // |--------------------------| -- <-- FP
1173 // | callee-allocated save | | <----|
1174 // | area for register varargs| | |
1175 // |--------------------------| | |
1176 // | callee-saved registers | | |
1177 // |--------------------------| -- |
1178 // | realignment (the size of | | |
1179 // | this area is not counted | | |
1180 // | in MFI.getStackSize()) | | |
1181 // |--------------------------| -- |-- MFI.getStackSize()
1182 // | RVV alignment padding | | |
1183 // | (not counted in | | |
1184 // | MFI.getStackSize() but | | |
1185 // | counted in | | |
1186 // | RVFI.getRVVStackSize()) | | |
1187 // |--------------------------| -- |
1188 // | RVV objects | | |
1189 // | (not counted in | | |
1190 // | MFI.getStackSize()) | | |
1191 // |--------------------------| -- |
1192 // | padding before RVV | | |
1193 // | (not counted in | | |
1194 // | MFI.getStackSize() or in | | |
1195 // | RVFI.getRVVStackSize()) | | |
1196 // |--------------------------| -- |
1197 // | scalar local variables | | <----'
1198 // |--------------------------| -- <-- BP (if var sized objects present)
1199 // | VarSize objects | |
1200 // |--------------------------| -- <-- SP
1201 if (hasBP(MF)) {
1202 FrameReg = RISCVABI::getBPReg();
1203 } else {
1204 // VarSize objects must be empty in this case!
1205 assert(!MFI.hasVarSizedObjects());
1206 FrameReg = SPReg;
1207 }
1208 } else {
1209 FrameReg = RI->getFrameRegister(MF);
1210 }
1211
1212 if (FrameReg == FPReg) {
1213 Offset += StackOffset::getFixed(RVFI->getVarArgsSaveSize());
1214 // When using FP to access scalable vector objects, we need to minus
1215 // the frame size.
1216 //
1217 // |--------------------------| -- <-- FP
1218 // | callee-allocated save | |
1219 // | area for register varargs| |
1220 // |--------------------------| |
1221 // | callee-saved registers | |
1222 // |--------------------------| | MFI.getStackSize()
1223 // | scalar local variables | |
1224 // |--------------------------| -- (Offset of RVV objects is from here.)
1225 // | RVV objects |
1226 // |--------------------------|
1227 // | VarSize objects |
1228 // |--------------------------| <-- SP
1230 assert(!RI->hasStackRealignment(MF) &&
1231 "Can't index across variable sized realign");
1232 // We don't expect any extra RVV alignment padding, as the stack size
1233 // and RVV object sections should be correct aligned in their own
1234 // right.
1236 "Inconsistent stack layout");
1238 }
1239 return Offset;
1240 }
1241
1242 // This case handles indexing off both SP and BP.
1243 // If indexing off SP, there must not be any var sized objects
1244 assert(FrameReg == RISCVABI::getBPReg() || !MFI.hasVarSizedObjects());
1245
1246 // When using SP to access frame objects, we need to add RVV stack size.
1247 //
1248 // |--------------------------| -- <-- FP
1249 // | callee-allocated save | | <----|
1250 // | area for register varargs| | |
1251 // |--------------------------| | |
1252 // | callee-saved registers | | |
1253 // |--------------------------| -- |
1254 // | RVV alignment padding | | |
1255 // | (not counted in | | |
1256 // | MFI.getStackSize() but | | |
1257 // | counted in | | |
1258 // | RVFI.getRVVStackSize()) | | |
1259 // |--------------------------| -- |
1260 // | RVV objects | | |-- MFI.getStackSize()
1261 // | (not counted in | | |
1262 // | MFI.getStackSize()) | | |
1263 // |--------------------------| -- |
1264 // | padding before RVV | | |
1265 // | (not counted in | | |
1266 // | MFI.getStackSize()) | | |
1267 // |--------------------------| -- |
1268 // | scalar local variables | | <----'
1269 // |--------------------------| -- <-- BP (if var sized objects present)
1270 // | VarSize objects | |
1271 // |--------------------------| -- <-- SP
1272 //
1273 // The total amount of padding surrounding RVV objects is described by
1274 // RVV->getRVVPadding() and it can be zero. It allows us to align the RVV
1275 // objects to the required alignment.
1276 if (MFI.getStackID(FI) == TargetStackID::Default) {
1277 if (MFI.isFixedObjectIndex(FI)) {
1278 assert(!RI->hasStackRealignment(MF) &&
1279 "Can't index across variable sized realign");
1281 RVFI->getRVVStackSize());
1282 } else {
1284 }
1285 } else if (MFI.getStackID(FI) == TargetStackID::ScalableVector) {
1286 // Ensure the base of the RVV stack is correctly aligned: add on the
1287 // alignment padding.
1288 int ScalarLocalVarSize = MFI.getStackSize() -
1289 RVFI->getCalleeSavedStackSize() -
1290 RVFI->getVarArgsSaveSize() + RVFI->getRVVPadding();
1291 Offset += StackOffset::get(ScalarLocalVarSize, RVFI->getRVVStackSize());
1292 }
1293 return Offset;
1294}
1295
1297 BitVector &SavedRegs,
1298 RegScavenger *RS) const {
1300 // Unconditionally spill RA and FP only if the function uses a frame
1301 // pointer.
1302 if (hasFP(MF)) {
1303 SavedRegs.set(RAReg);
1304 SavedRegs.set(FPReg);
1305 }
1306 // Mark BP as used if function has dedicated base pointer.
1307 if (hasBP(MF))
1308 SavedRegs.set(RISCVABI::getBPReg());
1309
1310 // When using cm.push/pop we must save X27 if we save X26.
1311 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
1312 if (RVFI->isPushable(MF) && SavedRegs.test(RISCV::X26))
1313 SavedRegs.set(RISCV::X27);
1314}
1315
1316std::pair<int64_t, Align>
1317RISCVFrameLowering::assignRVVStackObjectOffsets(MachineFunction &MF) const {
1318 MachineFrameInfo &MFI = MF.getFrameInfo();
1319 // Create a buffer of RVV objects to allocate.
1320 SmallVector<int, 8> ObjectsToAllocate;
1321 auto pushRVVObjects = [&](int FIBegin, int FIEnd) {
1322 for (int I = FIBegin, E = FIEnd; I != E; ++I) {
1323 unsigned StackID = MFI.getStackID(I);
1324 if (StackID != TargetStackID::ScalableVector)
1325 continue;
1326 if (MFI.isDeadObjectIndex(I))
1327 continue;
1328
1329 ObjectsToAllocate.push_back(I);
1330 }
1331 };
1332 // First push RVV Callee Saved object, then push RVV stack object
1333 std::vector<CalleeSavedInfo> &CSI = MF.getFrameInfo().getCalleeSavedInfo();
1334 const auto &RVVCSI = getRVVCalleeSavedInfo(MF, CSI);
1335 if (!RVVCSI.empty())
1336 pushRVVObjects(RVVCSI[0].getFrameIdx(),
1337 RVVCSI[RVVCSI.size() - 1].getFrameIdx() + 1);
1338 pushRVVObjects(0, MFI.getObjectIndexEnd() - RVVCSI.size());
1339
1340 // The minimum alignment is 16 bytes.
1341 Align RVVStackAlign(16);
1342 const auto &ST = MF.getSubtarget<RISCVSubtarget>();
1343
1344 if (!ST.hasVInstructions()) {
1345 assert(ObjectsToAllocate.empty() &&
1346 "Can't allocate scalable-vector objects without V instructions");
1347 return std::make_pair(0, RVVStackAlign);
1348 }
1349
1350 // Allocate all RVV locals and spills
1351 int64_t Offset = 0;
1352 for (int FI : ObjectsToAllocate) {
1353 // ObjectSize in bytes.
1354 int64_t ObjectSize = MFI.getObjectSize(FI);
1355 auto ObjectAlign =
1356 std::max(Align(RISCV::RVVBitsPerBlock / 8), MFI.getObjectAlign(FI));
1357 // If the data type is the fractional vector type, reserve one vector
1358 // register for it.
1359 if (ObjectSize < (RISCV::RVVBitsPerBlock / 8))
1360 ObjectSize = (RISCV::RVVBitsPerBlock / 8);
1361 Offset = alignTo(Offset + ObjectSize, ObjectAlign);
1362 MFI.setObjectOffset(FI, -Offset);
1363 // Update the maximum alignment of the RVV stack section
1364 RVVStackAlign = std::max(RVVStackAlign, ObjectAlign);
1365 }
1366
1367 uint64_t StackSize = Offset;
1368
1369 // Ensure the alignment of the RVV stack. Since we want the most-aligned
1370 // object right at the bottom (i.e., any padding at the top of the frame),
1371 // readjust all RVV objects down by the alignment padding.
1372 // Stack size and offsets are multiples of vscale, stack alignment is in
1373 // bytes, we can divide stack alignment by minimum vscale to get a maximum
1374 // stack alignment multiple of vscale.
1375 auto VScale =
1376 std::max<uint64_t>(ST.getRealMinVLen() / RISCV::RVVBitsPerBlock, 1);
1377 if (auto RVVStackAlignVScale = RVVStackAlign.value() / VScale) {
1378 if (auto AlignmentPadding =
1379 offsetToAlignment(StackSize, Align(RVVStackAlignVScale))) {
1380 StackSize += AlignmentPadding;
1381 for (int FI : ObjectsToAllocate)
1382 MFI.setObjectOffset(FI, MFI.getObjectOffset(FI) - AlignmentPadding);
1383 }
1384 }
1385
1386 return std::make_pair(StackSize, RVVStackAlign);
1387}
1388
1390 // For RVV spill, scalable stack offsets computing requires up to two scratch
1391 // registers
1392 static constexpr unsigned ScavSlotsNumRVVSpillScalableObject = 2;
1393
1394 // For RVV spill, non-scalable stack offsets computing requires up to one
1395 // scratch register.
1396 static constexpr unsigned ScavSlotsNumRVVSpillNonScalableObject = 1;
1397
1398 // ADDI instruction's destination register can be used for computing
1399 // offsets. So Scalable stack offsets require up to one scratch register.
1400 static constexpr unsigned ScavSlotsADDIScalableObject = 1;
1401
1402 static constexpr unsigned MaxScavSlotsNumKnown =
1403 std::max({ScavSlotsADDIScalableObject, ScavSlotsNumRVVSpillScalableObject,
1404 ScavSlotsNumRVVSpillNonScalableObject});
1405
1406 unsigned MaxScavSlotsNum = 0;
1408 return false;
1409 for (const MachineBasicBlock &MBB : MF)
1410 for (const MachineInstr &MI : MBB) {
1411 bool IsRVVSpill = RISCV::isRVVSpill(MI);
1412 for (auto &MO : MI.operands()) {
1413 if (!MO.isFI())
1414 continue;
1415 bool IsScalableVectorID = MF.getFrameInfo().getStackID(MO.getIndex()) ==
1417 if (IsRVVSpill) {
1418 MaxScavSlotsNum = std::max(
1419 MaxScavSlotsNum, IsScalableVectorID
1420 ? ScavSlotsNumRVVSpillScalableObject
1421 : ScavSlotsNumRVVSpillNonScalableObject);
1422 } else if (MI.getOpcode() == RISCV::ADDI && IsScalableVectorID) {
1423 MaxScavSlotsNum =
1424 std::max(MaxScavSlotsNum, ScavSlotsADDIScalableObject);
1425 }
1426 }
1427 if (MaxScavSlotsNum == MaxScavSlotsNumKnown)
1428 return MaxScavSlotsNumKnown;
1429 }
1430 return MaxScavSlotsNum;
1431}
1432
1433static bool hasRVVFrameObject(const MachineFunction &MF) {
1434 // Originally, the function will scan all the stack objects to check whether
1435 // if there is any scalable vector object on the stack or not. However, it
1436 // causes errors in the register allocator. In issue 53016, it returns false
1437 // before RA because there is no RVV stack objects. After RA, it returns true
1438 // because there are spilling slots for RVV values during RA. It will not
1439 // reserve BP during register allocation and generate BP access in the PEI
1440 // pass due to the inconsistent behavior of the function.
1441 //
1442 // The function is changed to use hasVInstructions() as the return value. It
1443 // is not precise, but it can make the register allocation correct.
1444 //
1445 // FIXME: Find a better way to make the decision or revisit the solution in
1446 // D103622.
1447 //
1448 // Refer to https://github.com/llvm/llvm-project/issues/53016.
1449 return MF.getSubtarget<RISCVSubtarget>().hasVInstructions();
1450}
1451
1453 const RISCVInstrInfo &TII) {
1454 unsigned FnSize = 0;
1455 for (auto &MBB : MF) {
1456 for (auto &MI : MBB) {
1457 // Far branches over 20-bit offset will be relaxed in branch relaxation
1458 // pass. In the worst case, conditional branches will be relaxed into
1459 // the following instruction sequence. Unconditional branches are
1460 // relaxed in the same way, with the exception that there is no first
1461 // branch instruction.
1462 //
1463 // foo
1464 // bne t5, t6, .rev_cond # `TII->getInstSizeInBytes(MI)` bytes
1465 // sd s11, 0(sp) # 4 bytes, or 2 bytes in RVC
1466 // jump .restore, s11 # 8 bytes
1467 // .rev_cond
1468 // bar
1469 // j .dest_bb # 4 bytes, or 2 bytes in RVC
1470 // .restore:
1471 // ld s11, 0(sp) # 4 bytes, or 2 bytes in RVC
1472 // .dest:
1473 // baz
1474 if (MI.isConditionalBranch())
1475 FnSize += TII.getInstSizeInBytes(MI);
1476 if (MI.isConditionalBranch() || MI.isUnconditionalBranch()) {
1478 FnSize += 2 + 8 + 2 + 2;
1479 else
1480 FnSize += 4 + 8 + 4 + 4;
1481 continue;
1482 }
1483
1484 FnSize += TII.getInstSizeInBytes(MI);
1485 }
1486 }
1487 return FnSize;
1488}
1489
1491 MachineFunction &MF, RegScavenger *RS) const {
1492 const RISCVRegisterInfo *RegInfo =
1493 MF.getSubtarget<RISCVSubtarget>().getRegisterInfo();
1494 const RISCVInstrInfo *TII = MF.getSubtarget<RISCVSubtarget>().getInstrInfo();
1495 MachineFrameInfo &MFI = MF.getFrameInfo();
1496 const TargetRegisterClass *RC = &RISCV::GPRRegClass;
1497 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
1498
1499 int64_t RVVStackSize;
1500 Align RVVStackAlign;
1501 std::tie(RVVStackSize, RVVStackAlign) = assignRVVStackObjectOffsets(MF);
1502
1503 RVFI->setRVVStackSize(RVVStackSize);
1504 RVFI->setRVVStackAlign(RVVStackAlign);
1505
1506 if (hasRVVFrameObject(MF)) {
1507 // Ensure the entire stack is aligned to at least the RVV requirement: some
1508 // scalable-vector object alignments are not considered by the
1509 // target-independent code.
1510 MFI.ensureMaxAlignment(RVVStackAlign);
1511 }
1512
1513 unsigned ScavSlotsNum = 0;
1514
1515 // estimateStackSize has been observed to under-estimate the final stack
1516 // size, so give ourselves wiggle-room by checking for stack size
1517 // representable an 11-bit signed field rather than 12-bits.
1518 if (!isInt<11>(MFI.estimateStackSize(MF)))
1519 ScavSlotsNum = 1;
1520
1521 // Far branches over 20-bit offset require a spill slot for scratch register.
1522 bool IsLargeFunction = !isInt<20>(estimateFunctionSizeInBytes(MF, *TII));
1523 if (IsLargeFunction)
1524 ScavSlotsNum = std::max(ScavSlotsNum, 1u);
1525
1526 // RVV loads & stores have no capacity to hold the immediate address offsets
1527 // so we must always reserve an emergency spill slot if the MachineFunction
1528 // contains any RVV spills.
1529 ScavSlotsNum = std::max(ScavSlotsNum, getScavSlotsNumForRVV(MF));
1530
1531 for (unsigned I = 0; I < ScavSlotsNum; I++) {
1532 int FI = MFI.CreateStackObject(RegInfo->getSpillSize(*RC),
1533 RegInfo->getSpillAlign(*RC), false);
1535
1536 if (IsLargeFunction && RVFI->getBranchRelaxationScratchFrameIndex() == -1)
1537 RVFI->setBranchRelaxationScratchFrameIndex(FI);
1538 }
1539
1540 unsigned Size = RVFI->getReservedSpillsSize();
1541 for (const auto &Info : MFI.getCalleeSavedInfo()) {
1542 int FrameIdx = Info.getFrameIdx();
1543 if (FrameIdx < 0 || MFI.getStackID(FrameIdx) != TargetStackID::Default)
1544 continue;
1545
1546 Size += MFI.getObjectSize(FrameIdx);
1547 }
1548 RVFI->setCalleeSavedStackSize(Size);
1549}
1550
1551// Not preserve stack space within prologue for outgoing variables when the
1552// function contains variable size objects or there are vector objects accessed
1553// by the frame pointer.
1554// Let eliminateCallFramePseudoInstr preserve stack space for it.
1556 return !MF.getFrameInfo().hasVarSizedObjects() &&
1557 !(hasFP(MF) && hasRVVFrameObject(MF));
1558}
1559
1560// Eliminate ADJCALLSTACKDOWN, ADJCALLSTACKUP pseudo instructions.
1564 DebugLoc DL = MI->getDebugLoc();
1565
1566 if (!hasReservedCallFrame(MF)) {
1567 // If space has not been reserved for a call frame, ADJCALLSTACKDOWN and
1568 // ADJCALLSTACKUP must be converted to instructions manipulating the stack
1569 // pointer. This is necessary when there is a variable length stack
1570 // allocation (e.g. alloca), which means it's not possible to allocate
1571 // space for outgoing arguments from within the function prologue.
1572 int64_t Amount = MI->getOperand(0).getImm();
1573
1574 if (Amount != 0) {
1575 // Ensure the stack remains aligned after adjustment.
1576 Amount = alignSPAdjust(Amount);
1577
1578 if (MI->getOpcode() == RISCV::ADJCALLSTACKDOWN)
1579 Amount = -Amount;
1580
1581 const RISCVRegisterInfo &RI = *STI.getRegisterInfo();
1584 }
1585 }
1586
1587 return MBB.erase(MI);
1588}
1589
1590// We would like to split the SP adjustment to reduce prologue/epilogue
1591// as following instructions. In this way, the offset of the callee saved
1592// register could fit in a single store. Supposed that the first sp adjust
1593// amount is 2032.
1594// add sp,sp,-2032
1595// sw ra,2028(sp)
1596// sw s0,2024(sp)
1597// sw s1,2020(sp)
1598// sw s3,2012(sp)
1599// sw s4,2008(sp)
1600// add sp,sp,-64
1603 const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
1604 const MachineFrameInfo &MFI = MF.getFrameInfo();
1605 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
1606 uint64_t StackSize = getStackSizeWithRVVPadding(MF);
1607
1608 // Disable SplitSPAdjust if save-restore libcall is used. The callee-saved
1609 // registers will be pushed by the save-restore libcalls, so we don't have to
1610 // split the SP adjustment in this case.
1611 if (RVFI->getReservedSpillsSize())
1612 return 0;
1613
1614 // Return the FirstSPAdjustAmount if the StackSize can not fit in a signed
1615 // 12-bit and there exists a callee-saved register needing to be pushed.
1616 if (!isInt<12>(StackSize) && (CSI.size() > 0)) {
1617 // FirstSPAdjustAmount is chosen at most as (2048 - StackAlign) because
1618 // 2048 will cause sp = sp + 2048 in the epilogue to be split into multiple
1619 // instructions. Offsets smaller than 2048 can fit in a single load/store
1620 // instruction, and we have to stick with the stack alignment. 2048 has
1621 // 16-byte alignment. The stack alignment for RV32 and RV64 is 16 and for
1622 // RV32E it is 4. So (2048 - StackAlign) will satisfy the stack alignment.
1623 const uint64_t StackAlign = getStackAlign().value();
1624
1625 // Amount of (2048 - StackAlign) will prevent callee saved and restored
1626 // instructions be compressed, so try to adjust the amount to the largest
1627 // offset that stack compression instructions accept when target supports
1628 // compression instructions.
1629 if (STI.hasStdExtCOrZca()) {
1630 // The compression extensions may support the following instructions:
1631 // riscv32: c.lwsp rd, offset[7:2] => 2^(6 + 2)
1632 // c.swsp rs2, offset[7:2] => 2^(6 + 2)
1633 // c.flwsp rd, offset[7:2] => 2^(6 + 2)
1634 // c.fswsp rs2, offset[7:2] => 2^(6 + 2)
1635 // riscv64: c.ldsp rd, offset[8:3] => 2^(6 + 3)
1636 // c.sdsp rs2, offset[8:3] => 2^(6 + 3)
1637 // c.fldsp rd, offset[8:3] => 2^(6 + 3)
1638 // c.fsdsp rs2, offset[8:3] => 2^(6 + 3)
1639 const uint64_t RVCompressLen = STI.getXLen() * 8;
1640 // Compared with amount (2048 - StackAlign), StackSize needs to
1641 // satisfy the following conditions to avoid using more instructions
1642 // to adjust the sp after adjusting the amount, such as
1643 // StackSize meets the condition (StackSize <= 2048 + RVCompressLen),
1644 // case1: Amount is 2048 - StackAlign: use addi + addi to adjust sp.
1645 // case2: Amount is RVCompressLen: use addi + addi to adjust sp.
1646 auto CanCompress = [&](uint64_t CompressLen) -> bool {
1647 if (StackSize <= 2047 + CompressLen ||
1648 (StackSize > 2048 * 2 - StackAlign &&
1649 StackSize <= 2047 * 2 + CompressLen) ||
1650 StackSize > 2048 * 3 - StackAlign)
1651 return true;
1652
1653 return false;
1654 };
1655 // In the epilogue, addi sp, sp, 496 is used to recover the sp and it
1656 // can be compressed(C.ADDI16SP, offset can be [-512, 496]), but
1657 // addi sp, sp, 512 can not be compressed. So try to use 496 first.
1658 const uint64_t ADDI16SPCompressLen = 496;
1659 if (STI.is64Bit() && CanCompress(ADDI16SPCompressLen))
1660 return ADDI16SPCompressLen;
1661 if (CanCompress(RVCompressLen))
1662 return RVCompressLen;
1663 }
1664 return 2048 - StackAlign;
1665 }
1666 return 0;
1667}
1668
1671 std::vector<CalleeSavedInfo> &CSI, unsigned &MinCSFrameIndex,
1672 unsigned &MaxCSFrameIndex) const {
1673 // Early exit if no callee saved registers are modified!
1674 if (CSI.empty())
1675 return true;
1676
1677 auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
1678
1679 if (RVFI->isPushable(MF)) {
1680 // Determine how many GPRs we need to push and save it to RVFI.
1681 Register MaxReg = getMaxPushPopReg(MF, CSI);
1682 if (MaxReg != RISCV::NoRegister) {
1683 auto [RegEnc, PushedRegNum] = getPushPopEncodingAndNum(MaxReg);
1684 RVFI->setRVPushRegs(PushedRegNum);
1685 RVFI->setRVPushStackSize(alignTo((STI.getXLen() / 8) * PushedRegNum, 16));
1686
1687 // Use encoded number to represent registers to spill.
1688 RVFI->setRVPushRlist(RegEnc);
1689 }
1690 }
1691
1692 MachineFrameInfo &MFI = MF.getFrameInfo();
1693 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
1694
1695 for (auto &CS : CSI) {
1696 unsigned Reg = CS.getReg();
1697 const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg);
1698 unsigned Size = RegInfo->getSpillSize(*RC);
1699
1700 // This might need a fixed stack slot.
1701 if (RVFI->useSaveRestoreLibCalls(MF) || RVFI->isPushable(MF)) {
1702 const auto *FII = llvm::find_if(
1703 FixedCSRFIMap, [&](auto P) { return P.first == CS.getReg(); });
1704 if (FII != std::end(FixedCSRFIMap)) {
1705 int64_t Offset;
1706 if (RVFI->isPushable(MF))
1707 Offset = -((FII->second + RVFI->getRVPushRegs() + 1) * (int64_t)Size);
1708 else
1709 Offset = FII->second * (int64_t)Size;
1710
1711 int FrameIdx = MFI.CreateFixedSpillStackObject(Size, Offset);
1712 assert(FrameIdx < 0);
1713 CS.setFrameIdx(FrameIdx);
1714 continue;
1715 }
1716 }
1717
1718 // Not a fixed slot.
1719 Align Alignment = RegInfo->getSpillAlign(*RC);
1720 // We may not be able to satisfy the desired alignment specification of
1721 // the TargetRegisterClass if the stack alignment is smaller. Use the
1722 // min.
1723 Alignment = std::min(Alignment, getStackAlign());
1724 int FrameIdx = MFI.CreateStackObject(Size, Alignment, true);
1725 if ((unsigned)FrameIdx < MinCSFrameIndex)
1726 MinCSFrameIndex = FrameIdx;
1727 if ((unsigned)FrameIdx > MaxCSFrameIndex)
1728 MaxCSFrameIndex = FrameIdx;
1729 CS.setFrameIdx(FrameIdx);
1732 }
1733
1734 // Allocate a fixed object that covers the full push or libcall size.
1735 if (RVFI->isPushable(MF)) {
1736 if (int64_t PushSize = RVFI->getRVPushStackSize())
1737 MFI.CreateFixedSpillStackObject(PushSize, -PushSize);
1738 } else if (int LibCallRegs = getLibCallID(MF, CSI) + 1) {
1739 int64_t LibCallFrameSize =
1740 alignTo((STI.getXLen() / 8) * LibCallRegs, getStackAlign());
1741 MFI.CreateFixedSpillStackObject(LibCallFrameSize, -LibCallFrameSize);
1742 }
1743
1744 return true;
1745}
1746
1750 if (CSI.empty())
1751 return true;
1752
1754 const TargetInstrInfo &TII = *MF->getSubtarget().getInstrInfo();
1755 DebugLoc DL;
1756 if (MI != MBB.end() && !MI->isDebugInstr())
1757 DL = MI->getDebugLoc();
1758
1759 // Emit CM.PUSH with base SPimm & evaluate Push stack
1761 if (RVFI->isPushable(*MF)) {
1762 unsigned PushedRegNum = RVFI->getRVPushRegs();
1763 if (PushedRegNum > 0) {
1764 // Use encoded number to represent registers to spill.
1765 int RegEnc = RVFI->getRVPushRlist();
1766 MachineInstrBuilder PushBuilder =
1767 BuildMI(MBB, MI, DL, TII.get(RISCV::CM_PUSH))
1769 PushBuilder.addImm((int64_t)RegEnc);
1770 PushBuilder.addImm(0);
1771
1772 for (unsigned i = 0; i < PushedRegNum; i++)
1773 PushBuilder.addUse(FixedCSRFIMap[i].first, RegState::Implicit);
1774 }
1775 } else if (const char *SpillLibCall = getSpillLibCallName(*MF, CSI)) {
1776 // Add spill libcall via non-callee-saved register t0.
1777 BuildMI(MBB, MI, DL, TII.get(RISCV::PseudoCALLReg), RISCV::X5)
1778 .addExternalSymbol(SpillLibCall, RISCVII::MO_CALL)
1780
1781 // Add registers spilled in libcall as liveins.
1782 for (auto &CS : CSI)
1783 MBB.addLiveIn(CS.getReg());
1784 }
1785
1786 // Manually spill values not spilled by libcall & Push/Pop.
1787 const auto &UnmanagedCSI = getUnmanagedCSI(*MF, CSI);
1788 const auto &RVVCSI = getRVVCalleeSavedInfo(*MF, CSI);
1789
1790 auto storeRegsToStackSlots = [&](decltype(UnmanagedCSI) CSInfo) {
1791 for (auto &CS : CSInfo) {
1792 // Insert the spill to the stack frame.
1793 Register Reg = CS.getReg();
1794 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
1796 CS.getFrameIdx(), RC, TRI, Register());
1797 }
1798 };
1799 storeRegsToStackSlots(UnmanagedCSI);
1800 storeRegsToStackSlots(RVVCSI);
1801
1802 return true;
1803}
1804
1805static unsigned getCalleeSavedRVVNumRegs(const Register &BaseReg) {
1806 return RISCV::VRRegClass.contains(BaseReg) ? 1
1807 : RISCV::VRM2RegClass.contains(BaseReg) ? 2
1808 : RISCV::VRM4RegClass.contains(BaseReg) ? 4
1809 : 8;
1810}
1811
1813 const Register &Reg) {
1814 MCRegister BaseReg = TRI.getSubReg(Reg, RISCV::sub_vrm1_0);
1815 // If it's not a grouped vector register, it doesn't have subregister, so
1816 // the base register is just itself.
1817 if (BaseReg == RISCV::NoRegister)
1818 BaseReg = Reg;
1819 return BaseReg;
1820}
1821
1822void RISCVFrameLowering::emitCalleeSavedRVVPrologCFI(
1825 const MachineFrameInfo &MFI = MF->getFrameInfo();
1827 const TargetInstrInfo &TII = *STI.getInstrInfo();
1830
1831 const auto &RVVCSI = getRVVCalleeSavedInfo(*MF, MFI.getCalleeSavedInfo());
1832 if (RVVCSI.empty())
1833 return;
1834
1835 uint64_t FixedSize = getStackSizeWithRVVPadding(*MF);
1836 if (!HasFP) {
1837 uint64_t ScalarLocalVarSize =
1838 MFI.getStackSize() - RVFI->getCalleeSavedStackSize() -
1839 RVFI->getVarArgsSaveSize() + RVFI->getRVVPadding();
1840 FixedSize -= ScalarLocalVarSize;
1841 }
1842
1843 for (auto &CS : RVVCSI) {
1844 // Insert the spill to the stack frame.
1845 int FI = CS.getFrameIdx();
1846 MCRegister BaseReg = getRVVBaseRegister(TRI, CS.getReg());
1847 unsigned NumRegs = getCalleeSavedRVVNumRegs(CS.getReg());
1848 for (unsigned i = 0; i < NumRegs; ++i) {
1849 unsigned CFIIndex = MF->addFrameInst(createDefCFAOffset(
1850 TRI, BaseReg + i, -FixedSize, MFI.getObjectOffset(FI) / 8 + i));
1851 BuildMI(MBB, MI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
1852 .addCFIIndex(CFIIndex)
1854 }
1855 }
1856}
1857
1858void RISCVFrameLowering::emitCalleeSavedRVVEpilogCFI(
1861 const MachineFrameInfo &MFI = MF->getFrameInfo();
1863 const TargetInstrInfo &TII = *STI.getInstrInfo();
1866
1867 const auto &RVVCSI = getRVVCalleeSavedInfo(*MF, MFI.getCalleeSavedInfo());
1868 for (auto &CS : RVVCSI) {
1869 MCRegister BaseReg = getRVVBaseRegister(TRI, CS.getReg());
1870 unsigned NumRegs = getCalleeSavedRVVNumRegs(CS.getReg());
1871 for (unsigned i = 0; i < NumRegs; ++i) {
1872 unsigned CFIIndex = MF->addFrameInst(MCCFIInstruction::createRestore(
1873 nullptr, RI->getDwarfRegNum(BaseReg + i, true)));
1874 BuildMI(MBB, MI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
1875 .addCFIIndex(CFIIndex)
1877 }
1878 }
1879}
1880
1884 if (CSI.empty())
1885 return true;
1886
1888 const TargetInstrInfo &TII = *MF->getSubtarget().getInstrInfo();
1889 DebugLoc DL;
1890 if (MI != MBB.end() && !MI->isDebugInstr())
1891 DL = MI->getDebugLoc();
1892
1893 // Manually restore values not restored by libcall & Push/Pop.
1894 // Reverse the restore order in epilog. In addition, the return
1895 // address will be restored first in the epilogue. It increases
1896 // the opportunity to avoid the load-to-use data hazard between
1897 // loading RA and return by RA. loadRegFromStackSlot can insert
1898 // multiple instructions.
1899 const auto &UnmanagedCSI = getUnmanagedCSI(*MF, CSI);
1900 const auto &RVVCSI = getRVVCalleeSavedInfo(*MF, CSI);
1901
1902 auto loadRegFromStackSlot = [&](decltype(UnmanagedCSI) CSInfo) {
1903 for (auto &CS : CSInfo) {
1904 Register Reg = CS.getReg();
1905 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
1906 TII.loadRegFromStackSlot(MBB, MI, Reg, CS.getFrameIdx(), RC, TRI,
1907 Register());
1908 assert(MI != MBB.begin() &&
1909 "loadRegFromStackSlot didn't insert any code!");
1910 }
1911 };
1912 loadRegFromStackSlot(RVVCSI);
1913 loadRegFromStackSlot(UnmanagedCSI);
1914
1916 if (RVFI->isPushable(*MF)) {
1917 int RegEnc = RVFI->getRVPushRlist();
1919 MachineInstrBuilder PopBuilder =
1920 BuildMI(MBB, MI, DL, TII.get(RISCV::CM_POP))
1922 // Use encoded number to represent registers to restore.
1923 PopBuilder.addImm(RegEnc);
1924 PopBuilder.addImm(0);
1925
1926 for (unsigned i = 0; i < RVFI->getRVPushRegs(); i++)
1927 PopBuilder.addDef(FixedCSRFIMap[i].first, RegState::ImplicitDefine);
1928 }
1929 } else {
1930 const char *RestoreLibCall = getRestoreLibCallName(*MF, CSI);
1931 if (RestoreLibCall) {
1932 // Add restore libcall via tail call.
1934 BuildMI(MBB, MI, DL, TII.get(RISCV::PseudoTAIL))
1935 .addExternalSymbol(RestoreLibCall, RISCVII::MO_CALL)
1937
1938 // Remove trailing returns, since the terminator is now a tail call to the
1939 // restore function.
1940 if (MI != MBB.end() && MI->getOpcode() == RISCV::PseudoRET) {
1941 NewMI->copyImplicitOps(*MF, *MI);
1942 MI->eraseFromParent();
1943 }
1944 }
1945 }
1946 return true;
1947}
1948
1950 // Keep the conventional code flow when not optimizing.
1951 if (MF.getFunction().hasOptNone())
1952 return false;
1953
1954 return true;
1955}
1956
1958 MachineBasicBlock *TmpMBB = const_cast<MachineBasicBlock *>(&MBB);
1959 const MachineFunction *MF = MBB.getParent();
1960 const auto *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
1961
1962 if (!RVFI->useSaveRestoreLibCalls(*MF))
1963 return true;
1964
1965 // Inserting a call to a __riscv_save libcall requires the use of the register
1966 // t0 (X5) to hold the return address. Therefore if this register is already
1967 // used we can't insert the call.
1968
1969 RegScavenger RS;
1970 RS.enterBasicBlock(*TmpMBB);
1971 return !RS.isRegUsed(RISCV::X5);
1972}
1973
1975 const MachineFunction *MF = MBB.getParent();
1976 MachineBasicBlock *TmpMBB = const_cast<MachineBasicBlock *>(&MBB);
1977 const auto *RVFI = MF->getInfo<RISCVMachineFunctionInfo>();
1978
1979 if (!RVFI->useSaveRestoreLibCalls(*MF))
1980 return true;
1981
1982 // Using the __riscv_restore libcalls to restore CSRs requires a tail call.
1983 // This means if we still need to continue executing code within this function
1984 // the restore cannot take place in this basic block.
1985
1986 if (MBB.succ_size() > 1)
1987 return false;
1988
1989 MachineBasicBlock *SuccMBB =
1990 MBB.succ_empty() ? TmpMBB->getFallThrough() : *MBB.succ_begin();
1991
1992 // Doing a tail call should be safe if there are no successors, because either
1993 // we have a returning block or the end of the block is unreachable, so the
1994 // restore will be eliminated regardless.
1995 if (!SuccMBB)
1996 return true;
1997
1998 // The successor can only contain a return, since we would effectively be
1999 // replacing the successor with our own tail return at the end of our block.
2000 return SuccMBB->isReturnBlock() && SuccMBB->size() == 1;
2001}
2002
2004 switch (ID) {
2007 return true;
2011 return false;
2012 }
2013 llvm_unreachable("Invalid TargetStackID::Value");
2014}
2015
2018}
2019
2020// Synthesize the probe loop.
2023 DebugLoc DL) {
2024
2025 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
2026 const RISCVInstrInfo *TII = Subtarget.getInstrInfo();
2027 bool IsRV64 = Subtarget.is64Bit();
2028 Align StackAlign = Subtarget.getFrameLowering()->getStackAlign();
2029 const RISCVTargetLowering *TLI = Subtarget.getTargetLowering();
2030 uint64_t ProbeSize = TLI->getStackProbeSize(MF, StackAlign);
2031
2032 MachineFunction::iterator MBBInsertPoint = std::next(MBB.getIterator());
2033 MachineBasicBlock *LoopTestMBB =
2035 MF.insert(MBBInsertPoint, LoopTestMBB);
2037 MF.insert(MBBInsertPoint, ExitMBB);
2039 Register TargetReg = RISCV::X6;
2040 Register ScratchReg = RISCV::X7;
2041
2042 // ScratchReg = ProbeSize
2043 TII->movImm(MBB, MBBI, DL, ScratchReg, ProbeSize, Flags);
2044
2045 // LoopTest:
2046 // SUB SP, SP, ProbeSize
2047 BuildMI(*LoopTestMBB, LoopTestMBB->end(), DL, TII->get(RISCV::SUB), SPReg)
2048 .addReg(SPReg)
2049 .addReg(ScratchReg)
2050 .setMIFlags(Flags);
2051
2052 // s[d|w] zero, 0(sp)
2053 BuildMI(*LoopTestMBB, LoopTestMBB->end(), DL,
2054 TII->get(IsRV64 ? RISCV::SD : RISCV::SW))
2055 .addReg(RISCV::X0)
2056 .addReg(SPReg)
2057 .addImm(0)
2058 .setMIFlags(Flags);
2059
2060 // BNE SP, TargetReg, LoopTest
2061 BuildMI(*LoopTestMBB, LoopTestMBB->end(), DL, TII->get(RISCV::BNE))
2062 .addReg(SPReg)
2063 .addReg(TargetReg)
2064 .addMBB(LoopTestMBB)
2065 .setMIFlags(Flags);
2066
2067 ExitMBB->splice(ExitMBB->end(), &MBB, std::next(MBBI), MBB.end());
2068
2069 LoopTestMBB->addSuccessor(ExitMBB);
2070 LoopTestMBB->addSuccessor(LoopTestMBB);
2071 MBB.addSuccessor(LoopTestMBB);
2072 // Update liveins.
2073 fullyRecomputeLiveIns({ExitMBB, LoopTestMBB});
2074}
2075
2076void RISCVFrameLowering::inlineStackProbe(MachineFunction &MF,
2077 MachineBasicBlock &MBB) const {
2078 auto Where = llvm::find_if(MBB, [](MachineInstr &MI) {
2079 return MI.getOpcode() == RISCV::PROBED_STACKALLOC;
2080 });
2081 if (Where != MBB.end()) {
2082 DebugLoc DL = MBB.findDebugLoc(Where);
2083 emitStackProbeInline(MF, MBB, Where, DL);
2084 Where->eraseFromParent();
2085 }
2086}
static MCCFIInstruction createDefCFAExpression(const TargetRegisterInfo &TRI, unsigned Reg, const StackOffset &Offset)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
dxil DXContainer Global Emitter
This file contains constants used for implementing Dwarf debug support.
uint64_t Size
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
static uint64_t estimateFunctionSizeInBytes(const LoongArchInstrInfo *TII, const MachineFunction &MF)
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
#define P(N)
static constexpr Register SPReg
static constexpr Register FPReg
static MCRegister getRVVBaseRegister(const RISCVRegisterInfo &TRI, const Register &Reg)
static const char * getRestoreLibCallName(const MachineFunction &MF, const std::vector< CalleeSavedInfo > &CSI)
static const char * getSpillLibCallName(const MachineFunction &MF, const std::vector< CalleeSavedInfo > &CSI)
static void emitStackProbeInline(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc DL)
static bool hasRVVFrameObject(const MachineFunction &MF)
static std::pair< unsigned, unsigned > getPushPopEncodingAndNum(const Register MaxReg)
static const std::pair< MCPhysReg, int8_t > FixedCSRFIMap[]
static SmallVector< CalleeSavedInfo, 8 > getRVVCalleeSavedInfo(const MachineFunction &MF, const std::vector< CalleeSavedInfo > &CSI)
static void appendScalableVectorExpression(const TargetRegisterInfo &TRI, SmallVectorImpl< char > &Expr, int FixedOffset, int ScalableOffset, llvm::raw_string_ostream &Comment)
static unsigned getCalleeSavedRVVNumRegs(const Register &BaseReg)
static Align getABIStackAlignment(RISCVABI::ABI ABI)
static SmallVector< CalleeSavedInfo, 8 > getPushOrLibCallsSavedInfo(const MachineFunction &MF, const std::vector< CalleeSavedInfo > &CSI)
static int getLibCallID(const MachineFunction &MF, const std::vector< CalleeSavedInfo > &CSI)
static void emitSCSPrologue(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL)
static constexpr Register RAReg
static void emitSCSEpilogue(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL)
static SmallVector< CalleeSavedInfo, 8 > getUnmanagedCSI(const MachineFunction &MF, const std::vector< CalleeSavedInfo > &CSI)
static Register getMaxPushPopReg(const MachineFunction &MF, const std::vector< CalleeSavedInfo > &CSI)
static unsigned getScavSlotsNumForRVV(MachineFunction &MF)
static MCCFIInstruction createDefCFAOffset(const TargetRegisterInfo &TRI, Register Reg, uint64_t FixedOffset, uint64_t ScalableOffset)
This file declares the machine register scavenger class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:163
bool test(unsigned Idx) const
Definition: BitVector.h:461
BitVector & set()
Definition: BitVector.h:351
The CalleeSavedInfo class tracks the information need to locate where a callee saved register is in t...
Register getReg() const
A debug info location.
Definition: DebugLoc.h:33
Diagnostic information for unsupported feature in backend.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:277
bool hasOptNone() const
Do not optimize this function (-O0).
Definition: Function.h:701
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:369
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:731
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
Store the specified register of the given register class to the specified stack frame index.
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
Load the specified register of the given register class from the specified stack frame index.
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
static MCCFIInstruction createDefCfaRegister(MCSymbol *L, unsigned Register, SMLoc Loc={})
.cfi_def_cfa_register modifies a rule for computing CFA.
Definition: MCDwarf.h:582
static MCCFIInstruction createRestore(MCSymbol *L, unsigned Register, SMLoc Loc={})
.cfi_restore says that the rule for Register is now the same as it was at the beginning of the functi...
Definition: MCDwarf.h:656
static MCCFIInstruction cfiDefCfa(MCSymbol *L, unsigned Register, int64_t Offset, SMLoc Loc={})
.cfi_def_cfa defines a rule for computing CFA as: take address from Register and add Offset to it.
Definition: MCDwarf.h:575
static MCCFIInstruction createOffset(MCSymbol *L, unsigned Register, int64_t Offset, SMLoc Loc={})
.cfi_offset Previous value of Register is saved at offset Offset from CFA.
Definition: MCDwarf.h:617
static MCCFIInstruction cfiDefCfaOffset(MCSymbol *L, int64_t Offset, SMLoc Loc={})
.cfi_def_cfa_offset modifies a rule for computing CFA.
Definition: MCDwarf.h:590
static MCCFIInstruction createEscape(MCSymbol *L, StringRef Vals, SMLoc Loc={}, StringRef Comment="")
.cfi_escape Allows the user to add arbitrary bytes to the unwind info.
Definition: MCDwarf.h:687
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
MachineBasicBlock * getFallThrough(bool JumpToFallThrough=true)
Return the fallthrough block if the block can implicitly transfer control to the block after it by fa...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
unsigned succ_size() const
bool isReturnBlock() const
Convenience function that returns true if the block ends in a return instruction.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any debug instructions.
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
bool isLiveIn(MCRegister Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
bool adjustsStack() const
Return true if this function adjusts the stack – e.g., when calling another function.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void ensureMaxAlignment(Align Alignment)
Make sure the function is at least Align bytes aligned.
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
Align getMaxAlign() const
Return the alignment in bytes that this function must be aligned to, which is greater than the defaul...
void setObjectOffset(int ObjectIdx, int64_t SPOffset)
Set the stack frame offset of the specified object.
uint64_t getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
int64_t getOffsetAdjustment() const
Return the correction for frame offsets.
uint64_t estimateStackSize(const MachineFunction &MF) const
Estimate and return the size of the stack frame.
void setStackID(int ObjectIdx, uint8_t ID)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool isMaxCallFrameSizeComputed() const
const std::vector< CalleeSavedInfo > & getCalleeSavedInfo() const
Returns a reference to call saved info vector for the current function.
int getObjectIndexEnd() const
Return one past the maximum frame object index.
int CreateFixedSpillStackObject(uint64_t Size, int64_t SPOffset, bool IsImmutable=false)
Create a spill slot at a fixed location on the stack.
uint8_t getStackID(int ObjectIdx) const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
void setStackSize(uint64_t Size)
Set the size of the stack.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
bool isDeadObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a dead object.
unsigned addFrameInst(const MCCFIInstruction &Inst)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addCFIIndex(unsigned CFIIndex) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:69
bool isReserved(MCRegister PhysReg) const
isReserved - Returns true when PhysReg is a reserved register.
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
MutableArrayRef - Represent a mutable reference to an array (0 or more elements consecutively in memo...
Definition: ArrayRef.h:310
void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override
emitProlog/emitEpilog - These methods insert prolog and epilog code into the function.
uint64_t getFirstSPAdjustAmount(const MachineFunction &MF) const
bool enableShrinkWrapping(const MachineFunction &MF) const override
Returns true if the target will correctly handle shrink wrapping.
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, ArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
spillCalleeSavedRegisters - Issues instruction(s) to spill all callee saved registers and returns tru...
bool hasBP(const MachineFunction &MF) const
bool canUseAsEpilogue(const MachineBasicBlock &MBB) const override
Check whether or not the given MBB can be used as a epilogue for the target.
bool assignCalleeSavedSpillSlots(MachineFunction &MF, const TargetRegisterInfo *TRI, std::vector< CalleeSavedInfo > &CSI, unsigned &MinCSFrameIndex, unsigned &MaxCSFrameIndex) const override
assignCalleeSavedSpillSlots - Allows target to override spill slot assignment logic.
bool hasFPImpl(const MachineFunction &MF) const override
bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, MutableArrayRef< CalleeSavedInfo > CSI, const TargetRegisterInfo *TRI) const override
restoreCalleeSavedRegisters - Issues instruction(s) to restore all callee saved registers and returns...
bool hasReservedCallFrame(const MachineFunction &MF) const override
hasReservedCallFrame - Under normal circumstances, when a frame pointer is not required,...
void allocateStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, MachineFunction &MF, uint64_t Offset, uint64_t RealStackSize, bool EmitCFI, bool NeedProbe, uint64_t ProbeSize) const
const RISCVSubtarget & STI
StackOffset getFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg) const override
getFrameIndexReference - This method should return the base register and offset used to reference a f...
bool isSupportedStackID(TargetStackID::Value ID) const override
void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS) const override
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override
TargetStackID::Value getStackIDForScalableVectors() const override
Returns the StackID that scalable vectors should be associated with.
void processFunctionBeforeFrameFinalized(MachineFunction &MF, RegScavenger *RS) const override
processFunctionBeforeFrameFinalized - This method is called immediately before the specified function...
MachineBasicBlock::iterator eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
This method is called during prolog/epilog code insertion to eliminate call frame setup and destroy p...
bool canUseAsPrologue(const MachineBasicBlock &MBB) const override
Check whether or not the given MBB can be used as a prologue for the target.
RISCVFrameLowering(const RISCVSubtarget &STI)
uint64_t getStackSizeWithRVVPadding(const MachineFunction &MF) const
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
bool isPushable(const MachineFunction &MF) const
bool useSaveRestoreLibCalls(const MachineFunction &MF) const
bool hasStdExtCOrZca() const
unsigned getXLen() const
bool hasVInstructions() const
bool isRegisterReservedByUser(Register i) const override
const RISCVRegisterInfo * getRegisterInfo() const override
const RISCVInstrInfo * getInstrInfo() const override
bool hasInlineStackProbe(const MachineFunction &MF) const override
True if stack clash protection is enabled for this functions.
unsigned getStackProbeSize(const MachineFunction &MF, Align StackAlign) const
bool isRegUsed(Register Reg, bool includeReserved=true) const
Return if a specific register is currently used.
void enterBasicBlock(MachineBasicBlock &MBB)
Start tracking liveness from the begin of basic block MBB.
void addScavengingFrameIndex(int FI)
Add a scavenging frame index.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr unsigned id() const
Definition: Register.h:103
Represents a location in source code.
Definition: SMLoc.h:23
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
void append(StringRef RHS)
Append from a StringRef.
Definition: SmallString.h:68
StringRef str() const
Explicit conversion to StringRef.
Definition: SmallString.h:254
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:683
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
StackOffset holds a fixed and a scalable offset in bytes.
Definition: TypeSize.h:33
int64_t getFixed() const
Returns the fixed component of the stack.
Definition: TypeSize.h:49
int64_t getScalable() const
Returns the scalable component of the stack.
Definition: TypeSize.h:52
static StackOffset get(int64_t Fixed, int64_t Scalable)
Definition: TypeSize.h:44
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
Information about stack frame layout on the target.
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
virtual void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs, RegScavenger *RS=nullptr) const
This method determines which of the registers reported by TargetRegisterInfo::getCalleeSavedRegs() sh...
int getOffsetOfLocalArea() const
getOffsetOfLocalArea - This method returns the offset of the local area from the stack pointer on ent...
Align getStackAlign() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
int alignSPAdjust(int SPAdj) const
alignSPAdjust - This method aligns the stack adjustment to the correct alignment.
TargetInstrInfo - Interface to description of machine instruction set.
TargetOptions Options
bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
const TargetRegisterClass * getMinimalPhysRegClass(MCRegister Reg, MVT VT=MVT::Other) const
Returns the Register Class of a physical register of the given type, picking the most sub register cl...
Align getSpillAlign(const TargetRegisterClass &RC) const
Return the minimum required alignment in bytes for a spill slot for a register of this class.
bool hasStackRealignment(const MachineFunction &MF) const
True if stack realignment is required and still possible.
unsigned getSpillSize(const TargetRegisterClass &RC) const
Return the size in bytes of the stack slot allocated to hold a spilled copy of a register from class ...
virtual Register getFrameRegister(const MachineFunction &MF) const =0
Debug information queries.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
self_iterator getIterator()
Definition: ilist_node.h:132
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:661
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
Definition: CallingConv.h:50
MCRegister getBPReg()
MCRegister getSCSPReg()
static constexpr unsigned RVVBitsPerBlock
bool isRVVSpill(const MachineInstr &MI)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
Reg
All possible values of the reg field in the ModR/M byte.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
IterT next_nodbg(IterT It, IterT End, bool SkipPseudoOp=true)
Increment It, then continue incrementing it while it points to a debug instruction.
@ Offset
Definition: DWP.cpp:480
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1697
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr T alignDown(U Value, V Align, W Skew=0)
Returns the largest unsigned integer less than or equal to Value and is Skew mod Align.
Definition: MathExtras.h:555
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1753
uint64_t offsetToAlignment(uint64_t Value, Align Alignment)
Returns the offset to the next integer (mod 2**64) that is greater than or equal to Value and is a mu...
Definition: Alignment.h:197
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
Definition: Alignment.h:155
unsigned encodeSLEB128(int64_t Value, raw_ostream &OS, unsigned PadTo=0)
Utility function to encode a SLEB128 value to an output stream.
Definition: LEB128.h:23
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1766
unsigned encodeULEB128(uint64_t Value, raw_ostream &OS, unsigned PadTo=0)
Utility function to encode a ULEB128 value to an output stream.
Definition: LEB128.h:80
unsigned Log2(Align A)
Returns the log2 of the alignment.
Definition: Alignment.h:208
void fullyRecomputeLiveIns(ArrayRef< MachineBasicBlock * > MBBs)
Convenience function for recomputing live-in's for a set of MBBs until the computation converges.
Definition: LivePhysRegs.h:215
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
static bool isRVVRegClass(const TargetRegisterClass *RC)
void adjustReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, Register SrcReg, StackOffset Offset, MachineInstr::MIFlag Flag, MaybeAlign RequiredAlign) const