LLVM 20.0.0git
RISCVRegisterInfo.cpp
Go to the documentation of this file.
1//===-- RISCVRegisterInfo.cpp - RISC-V Register Information -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the RISC-V implementation of the TargetRegisterInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "RISCVRegisterInfo.h"
14#include "RISCV.h"
15#include "RISCVSubtarget.h"
16#include "llvm/ADT/SmallSet.h"
26
27#define GET_REGINFO_TARGET_DESC
28#include "RISCVGenRegisterInfo.inc"
29
30using namespace llvm;
31
32static cl::opt<bool> DisableCostPerUse("riscv-disable-cost-per-use",
33 cl::init(false), cl::Hidden);
34static cl::opt<bool>
35 DisableRegAllocHints("riscv-disable-regalloc-hints", cl::Hidden,
36 cl::init(false),
37 cl::desc("Disable two address hints for register "
38 "allocation"));
39
40static_assert(RISCV::X1 == RISCV::X0 + 1, "Register list not consecutive");
41static_assert(RISCV::X31 == RISCV::X0 + 31, "Register list not consecutive");
42static_assert(RISCV::F1_H == RISCV::F0_H + 1, "Register list not consecutive");
43static_assert(RISCV::F31_H == RISCV::F0_H + 31,
44 "Register list not consecutive");
45static_assert(RISCV::F1_F == RISCV::F0_F + 1, "Register list not consecutive");
46static_assert(RISCV::F31_F == RISCV::F0_F + 31,
47 "Register list not consecutive");
48static_assert(RISCV::F1_D == RISCV::F0_D + 1, "Register list not consecutive");
49static_assert(RISCV::F31_D == RISCV::F0_D + 31,
50 "Register list not consecutive");
51static_assert(RISCV::V1 == RISCV::V0 + 1, "Register list not consecutive");
52static_assert(RISCV::V31 == RISCV::V0 + 31, "Register list not consecutive");
53
55 : RISCVGenRegisterInfo(RISCV::X1, /*DwarfFlavour*/0, /*EHFlavor*/0,
56 /*PC*/0, HwMode) {}
57
58const MCPhysReg *
60 auto &Subtarget = MF->getSubtarget<RISCVSubtarget>();
62 return CSR_NoRegs_SaveList;
63 if (MF->getFunction().hasFnAttribute("interrupt")) {
64 if (Subtarget.hasStdExtD())
65 return CSR_XLEN_F64_Interrupt_SaveList;
66 if (Subtarget.hasStdExtF())
67 return Subtarget.hasStdExtE() ? CSR_XLEN_F32_Interrupt_RVE_SaveList
68 : CSR_XLEN_F32_Interrupt_SaveList;
69 return Subtarget.hasStdExtE() ? CSR_Interrupt_RVE_SaveList
70 : CSR_Interrupt_SaveList;
71 }
72
73 bool HasVectorCSR =
75 Subtarget.hasVInstructions();
76
77 switch (Subtarget.getTargetABI()) {
78 default:
79 llvm_unreachable("Unrecognized ABI");
82 return CSR_ILP32E_LP64E_SaveList;
85 if (HasVectorCSR)
86 return CSR_ILP32_LP64_V_SaveList;
87 return CSR_ILP32_LP64_SaveList;
90 if (HasVectorCSR)
91 return CSR_ILP32F_LP64F_V_SaveList;
92 return CSR_ILP32F_LP64F_SaveList;
95 if (HasVectorCSR)
96 return CSR_ILP32D_LP64D_V_SaveList;
97 return CSR_ILP32D_LP64D_SaveList;
98 }
99}
100
102 const RISCVFrameLowering *TFI = getFrameLowering(MF);
103 BitVector Reserved(getNumRegs());
104 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
105
106 for (size_t Reg = 0; Reg < getNumRegs(); Reg++) {
107 // Mark any GPRs requested to be reserved as such
108 if (Subtarget.isRegisterReservedByUser(Reg))
109 markSuperRegs(Reserved, Reg);
110
111 // Mark all the registers defined as constant in TableGen as reserved.
112 if (isConstantPhysReg(Reg))
113 markSuperRegs(Reserved, Reg);
114 }
115
116 // Use markSuperRegs to ensure any register aliases are also reserved
117 markSuperRegs(Reserved, RISCV::X2_H); // sp
118 markSuperRegs(Reserved, RISCV::X3_H); // gp
119 markSuperRegs(Reserved, RISCV::X4_H); // tp
120 if (TFI->hasFP(MF))
121 markSuperRegs(Reserved, RISCV::X8_H); // fp
122 // Reserve the base register if we need to realign the stack and allocate
123 // variable-sized objects at runtime.
124 if (TFI->hasBP(MF))
125 markSuperRegs(Reserved, RISCVABI::getBPReg()); // bp
126
127 // Additionally reserve dummy register used to form the register pair
128 // beginning with 'x0' for instructions that take register pairs.
129 markSuperRegs(Reserved, RISCV::DUMMY_REG_PAIR_WITH_X0);
130
131 // There are only 16 GPRs for RVE.
132 if (Subtarget.hasStdExtE())
133 for (MCPhysReg Reg = RISCV::X16_H; Reg <= RISCV::X31_H; Reg++)
134 markSuperRegs(Reserved, Reg);
135
136 // V registers for code generation. We handle them manually.
137 markSuperRegs(Reserved, RISCV::VL);
138 markSuperRegs(Reserved, RISCV::VTYPE);
139 markSuperRegs(Reserved, RISCV::VXSAT);
140 markSuperRegs(Reserved, RISCV::VXRM);
141
142 // Floating point environment registers.
143 markSuperRegs(Reserved, RISCV::FRM);
144 markSuperRegs(Reserved, RISCV::FFLAGS);
145
146 // SiFive VCIX state registers.
147 markSuperRegs(Reserved, RISCV::SF_VCIX_STATE);
148
150 if (Subtarget.hasStdExtE())
151 report_fatal_error("Graal reserved registers do not exist in RVE");
152 markSuperRegs(Reserved, RISCV::X23_H);
153 markSuperRegs(Reserved, RISCV::X27_H);
154 }
155
156 // Shadow stack pointer.
157 markSuperRegs(Reserved, RISCV::SSP);
158
159 assert(checkAllSuperRegsMarked(Reserved));
160 return Reserved;
161}
162
164 MCRegister PhysReg) const {
165 return !MF.getSubtarget().isRegisterReservedByUser(PhysReg);
166}
167
169 return CSR_NoRegs_RegMask;
170}
171
174 const DebugLoc &DL, Register DestReg,
177 MaybeAlign RequiredAlign) const {
178
179 if (DestReg == SrcReg && !Offset.getFixed() && !Offset.getScalable())
180 return;
181
185 const RISCVInstrInfo *TII = ST.getInstrInfo();
186
187 // Optimize compile time offset case
188 if (Offset.getScalable()) {
189 if (auto VLEN = ST.getRealVLen()) {
190 // 1. Multiply the number of v-slots by the (constant) length of register
191 const int64_t VLENB = *VLEN / 8;
192 assert(Offset.getScalable() % (RISCV::RVVBitsPerBlock / 8) == 0 &&
193 "Reserve the stack by the multiple of one vector size.");
194 const int64_t NumOfVReg = Offset.getScalable() / 8;
195 const int64_t FixedOffset = NumOfVReg * VLENB;
196 if (!isInt<32>(FixedOffset)) {
198 "Frame size outside of the signed 32-bit range not supported");
199 }
200 Offset = StackOffset::getFixed(FixedOffset + Offset.getFixed());
201 }
202 }
203
204 bool KillSrcReg = false;
205
206 if (Offset.getScalable()) {
207 unsigned ScalableAdjOpc = RISCV::ADD;
208 int64_t ScalableValue = Offset.getScalable();
209 if (ScalableValue < 0) {
210 ScalableValue = -ScalableValue;
211 ScalableAdjOpc = RISCV::SUB;
212 }
213 // Get vlenb and multiply vlen with the number of vector registers.
214 Register ScratchReg = DestReg;
215 if (DestReg == SrcReg)
216 ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
217
218 assert(ScalableValue > 0 && "There is no need to get VLEN scaled value.");
219 assert(ScalableValue % (RISCV::RVVBitsPerBlock / 8) == 0 &&
220 "Reserve the stack by the multiple of one vector size.");
221 assert(isInt<32>(ScalableValue / (RISCV::RVVBitsPerBlock / 8)) &&
222 "Expect the number of vector registers within 32-bits.");
223 uint32_t NumOfVReg = ScalableValue / (RISCV::RVVBitsPerBlock / 8);
224 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), ScratchReg)
225 .setMIFlag(Flag);
226
227 if (ScalableAdjOpc == RISCV::ADD && ST.hasStdExtZba() &&
228 (NumOfVReg == 2 || NumOfVReg == 4 || NumOfVReg == 8)) {
229 unsigned Opc = NumOfVReg == 2 ? RISCV::SH1ADD :
230 (NumOfVReg == 4 ? RISCV::SH2ADD : RISCV::SH3ADD);
231 BuildMI(MBB, II, DL, TII->get(Opc), DestReg)
232 .addReg(ScratchReg, RegState::Kill).addReg(SrcReg)
233 .setMIFlag(Flag);
234 } else {
235 TII->mulImm(MF, MBB, II, DL, ScratchReg, NumOfVReg, Flag);
236 BuildMI(MBB, II, DL, TII->get(ScalableAdjOpc), DestReg)
237 .addReg(SrcReg).addReg(ScratchReg, RegState::Kill)
238 .setMIFlag(Flag);
239 }
240 SrcReg = DestReg;
241 KillSrcReg = true;
242 }
243
244 int64_t Val = Offset.getFixed();
245 if (DestReg == SrcReg && Val == 0)
246 return;
247
248 const uint64_t Align = RequiredAlign.valueOrOne().value();
249
250 if (isInt<12>(Val)) {
251 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
252 .addReg(SrcReg, getKillRegState(KillSrcReg))
253 .addImm(Val)
254 .setMIFlag(Flag);
255 return;
256 }
257
258 // Try to split the offset across two ADDIs. We need to keep the intermediate
259 // result aligned after each ADDI. We need to determine the maximum value we
260 // can put in each ADDI. In the negative direction, we can use -2048 which is
261 // always sufficiently aligned. In the positive direction, we need to find the
262 // largest 12-bit immediate that is aligned. Exclude -4096 since it can be
263 // created with LUI.
264 assert(Align < 2048 && "Required alignment too large");
265 int64_t MaxPosAdjStep = 2048 - Align;
266 if (Val > -4096 && Val <= (2 * MaxPosAdjStep)) {
267 int64_t FirstAdj = Val < 0 ? -2048 : MaxPosAdjStep;
268 Val -= FirstAdj;
269 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
270 .addReg(SrcReg, getKillRegState(KillSrcReg))
271 .addImm(FirstAdj)
272 .setMIFlag(Flag);
273 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
274 .addReg(DestReg, RegState::Kill)
275 .addImm(Val)
276 .setMIFlag(Flag);
277 return;
278 }
279
280 // Use shNadd if doing so lets us materialize a 12 bit immediate with a single
281 // instruction. This saves 1 instruction over the full lui/addi+add fallback
282 // path. We avoid anything which can be done with a single lui as it might
283 // be compressible. Note that the sh1add case is fully covered by the 2x addi
284 // case just above and is thus ommitted.
285 if (ST.hasStdExtZba() && (Val & 0xFFF) != 0) {
286 unsigned Opc = 0;
287 if (isShiftedInt<12, 3>(Val)) {
288 Opc = RISCV::SH3ADD;
289 Val = Val >> 3;
290 } else if (isShiftedInt<12, 2>(Val)) {
291 Opc = RISCV::SH2ADD;
292 Val = Val >> 2;
293 }
294 if (Opc) {
295 Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
296 TII->movImm(MBB, II, DL, ScratchReg, Val, Flag);
297 BuildMI(MBB, II, DL, TII->get(Opc), DestReg)
298 .addReg(ScratchReg, RegState::Kill)
299 .addReg(SrcReg, getKillRegState(KillSrcReg))
300 .setMIFlag(Flag);
301 return;
302 }
303 }
304
305 unsigned Opc = RISCV::ADD;
306 if (Val < 0) {
307 Val = -Val;
308 Opc = RISCV::SUB;
309 }
310
311 Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
312 TII->movImm(MBB, II, DL, ScratchReg, Val, Flag);
313 BuildMI(MBB, II, DL, TII->get(Opc), DestReg)
314 .addReg(SrcReg, getKillRegState(KillSrcReg))
315 .addReg(ScratchReg, RegState::Kill)
316 .setMIFlag(Flag);
317}
318
319// Split a VSPILLx_Mx pseudo into multiple whole register stores separated by
320// LMUL*VLENB bytes.
322 DebugLoc DL = II->getDebugLoc();
323 MachineBasicBlock &MBB = *II->getParent();
326 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
327 const TargetInstrInfo *TII = STI.getInstrInfo();
329
330 auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(II->getOpcode());
331 unsigned NF = ZvlssegInfo->first;
332 unsigned LMUL = ZvlssegInfo->second;
333 assert(NF * LMUL <= 8 && "Invalid NF/LMUL combinations.");
334 unsigned Opcode, SubRegIdx;
335 switch (LMUL) {
336 default:
337 llvm_unreachable("LMUL must be 1, 2, or 4.");
338 case 1:
339 Opcode = RISCV::VS1R_V;
340 SubRegIdx = RISCV::sub_vrm1_0;
341 break;
342 case 2:
343 Opcode = RISCV::VS2R_V;
344 SubRegIdx = RISCV::sub_vrm2_0;
345 break;
346 case 4:
347 Opcode = RISCV::VS4R_V;
348 SubRegIdx = RISCV::sub_vrm4_0;
349 break;
350 }
351 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
352 "Unexpected subreg numbering");
353 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
354 "Unexpected subreg numbering");
355 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
356 "Unexpected subreg numbering");
357
358 Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
359 // Optimize for constant VLEN.
360 if (auto VLEN = STI.getRealVLen()) {
361 const int64_t VLENB = *VLEN / 8;
362 int64_t Offset = VLENB * LMUL;
363 STI.getInstrInfo()->movImm(MBB, II, DL, VL, Offset);
364 } else {
365 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL);
366 uint32_t ShiftAmount = Log2_32(LMUL);
367 if (ShiftAmount != 0)
368 BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL)
369 .addReg(VL)
370 .addImm(ShiftAmount);
371 }
372
373 Register SrcReg = II->getOperand(0).getReg();
374 Register Base = II->getOperand(1).getReg();
375 bool IsBaseKill = II->getOperand(1).isKill();
376 Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass);
377 for (unsigned I = 0; I < NF; ++I) {
378 // Adding implicit-use of super register to describe we are using part of
379 // super register, that prevents machine verifier complaining when part of
380 // subreg is undef, see comment in MachineVerifier::checkLiveness for more
381 // detail.
382 BuildMI(MBB, II, DL, TII->get(Opcode))
383 .addReg(TRI->getSubReg(SrcReg, SubRegIdx + I))
384 .addReg(Base, getKillRegState(I == NF - 1))
385 .addMemOperand(*(II->memoperands_begin()))
386 .addReg(SrcReg, RegState::Implicit);
387 if (I != NF - 1)
388 BuildMI(MBB, II, DL, TII->get(RISCV::ADD), NewBase)
389 .addReg(Base, getKillRegState(I != 0 || IsBaseKill))
390 .addReg(VL, getKillRegState(I == NF - 2));
391 Base = NewBase;
392 }
393 II->eraseFromParent();
394}
395
396// Split a VSPILLx_Mx pseudo into multiple whole register loads separated by
397// LMUL*VLENB bytes.
399 DebugLoc DL = II->getDebugLoc();
400 MachineBasicBlock &MBB = *II->getParent();
403 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
404 const TargetInstrInfo *TII = STI.getInstrInfo();
406
407 auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(II->getOpcode());
408 unsigned NF = ZvlssegInfo->first;
409 unsigned LMUL = ZvlssegInfo->second;
410 assert(NF * LMUL <= 8 && "Invalid NF/LMUL combinations.");
411 unsigned Opcode, SubRegIdx;
412 switch (LMUL) {
413 default:
414 llvm_unreachable("LMUL must be 1, 2, or 4.");
415 case 1:
416 Opcode = RISCV::VL1RE8_V;
417 SubRegIdx = RISCV::sub_vrm1_0;
418 break;
419 case 2:
420 Opcode = RISCV::VL2RE8_V;
421 SubRegIdx = RISCV::sub_vrm2_0;
422 break;
423 case 4:
424 Opcode = RISCV::VL4RE8_V;
425 SubRegIdx = RISCV::sub_vrm4_0;
426 break;
427 }
428 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
429 "Unexpected subreg numbering");
430 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
431 "Unexpected subreg numbering");
432 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
433 "Unexpected subreg numbering");
434
435 Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
436 // Optimize for constant VLEN.
437 if (auto VLEN = STI.getRealVLen()) {
438 const int64_t VLENB = *VLEN / 8;
439 int64_t Offset = VLENB * LMUL;
440 STI.getInstrInfo()->movImm(MBB, II, DL, VL, Offset);
441 } else {
442 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL);
443 uint32_t ShiftAmount = Log2_32(LMUL);
444 if (ShiftAmount != 0)
445 BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL)
446 .addReg(VL)
447 .addImm(ShiftAmount);
448 }
449
450 Register DestReg = II->getOperand(0).getReg();
451 Register Base = II->getOperand(1).getReg();
452 bool IsBaseKill = II->getOperand(1).isKill();
453 Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass);
454 for (unsigned I = 0; I < NF; ++I) {
455 BuildMI(MBB, II, DL, TII->get(Opcode),
456 TRI->getSubReg(DestReg, SubRegIdx + I))
457 .addReg(Base, getKillRegState(I == NF - 1))
458 .addMemOperand(*(II->memoperands_begin()));
459 if (I != NF - 1)
460 BuildMI(MBB, II, DL, TII->get(RISCV::ADD), NewBase)
461 .addReg(Base, getKillRegState(I != 0 || IsBaseKill))
462 .addReg(VL, getKillRegState(I == NF - 2));
463 Base = NewBase;
464 }
465 II->eraseFromParent();
466}
467
469 int SPAdj, unsigned FIOperandNum,
470 RegScavenger *RS) const {
471 assert(SPAdj == 0 && "Unexpected non-zero SPAdj value");
472
473 MachineInstr &MI = *II;
474 MachineFunction &MF = *MI.getParent()->getParent();
476 DebugLoc DL = MI.getDebugLoc();
477
478 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
479 Register FrameReg;
481 getFrameLowering(MF)->getFrameIndexReference(MF, FrameIndex, FrameReg);
482 bool IsRVVSpill = RISCV::isRVVSpill(MI);
483 if (!IsRVVSpill)
484 Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
485
486 if (!isInt<32>(Offset.getFixed())) {
488 "Frame offsets outside of the signed 32-bit range not supported");
489 }
490
491 if (!IsRVVSpill) {
492 int64_t Val = Offset.getFixed();
493 int64_t Lo12 = SignExtend64<12>(Val);
494 unsigned Opc = MI.getOpcode();
495 if (Opc == RISCV::ADDI && !isInt<12>(Val)) {
496 // We chose to emit the canonical immediate sequence rather than folding
497 // the offset into the using add under the theory that doing so doesn't
498 // save dynamic instruction count and some target may fuse the canonical
499 // 32 bit immediate sequence. We still need to clear the portion of the
500 // offset encoded in the immediate.
501 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
502 } else if ((Opc == RISCV::PREFETCH_I || Opc == RISCV::PREFETCH_R ||
503 Opc == RISCV::PREFETCH_W) &&
504 (Lo12 & 0b11111) != 0) {
505 // Prefetch instructions require the offset to be 32 byte aligned.
506 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
507 } else if ((Opc == RISCV::PseudoRV32ZdinxLD ||
508 Opc == RISCV::PseudoRV32ZdinxSD) &&
509 Lo12 >= 2044) {
510 // This instruction will be split into 2 instructions. The second
511 // instruction will add 4 to the immediate. If that would overflow 12
512 // bits, we can't fold the offset.
513 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
514 } else {
515 // We can encode an add with 12 bit signed immediate in the immediate
516 // operand of our user instruction. As a result, the remaining
517 // offset can by construction, at worst, a LUI and a ADD.
518 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Lo12);
520 Offset.getScalable());
521 }
522 }
523
524 if (Offset.getScalable() || Offset.getFixed()) {
525 Register DestReg;
526 if (MI.getOpcode() == RISCV::ADDI)
527 DestReg = MI.getOperand(0).getReg();
528 else
529 DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
530 adjustReg(*II->getParent(), II, DL, DestReg, FrameReg, Offset,
531 MachineInstr::NoFlags, std::nullopt);
532 MI.getOperand(FIOperandNum).ChangeToRegister(DestReg, /*IsDef*/false,
533 /*IsImp*/false,
534 /*IsKill*/true);
535 } else {
536 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, /*IsDef*/false,
537 /*IsImp*/false,
538 /*IsKill*/false);
539 }
540
541 // If after materializing the adjustment, we have a pointless ADDI, remove it
542 if (MI.getOpcode() == RISCV::ADDI &&
543 MI.getOperand(0).getReg() == MI.getOperand(1).getReg() &&
544 MI.getOperand(2).getImm() == 0) {
545 MI.eraseFromParent();
546 return true;
547 }
548
549 // Handle spill/fill of synthetic register classes for segment operations to
550 // ensure correctness in the edge case one gets spilled. There are many
551 // possible optimizations here, but given the extreme rarity of such spills,
552 // we prefer simplicity of implementation for now.
553 switch (MI.getOpcode()) {
554 case RISCV::PseudoVSPILL2_M1:
555 case RISCV::PseudoVSPILL2_M2:
556 case RISCV::PseudoVSPILL2_M4:
557 case RISCV::PseudoVSPILL3_M1:
558 case RISCV::PseudoVSPILL3_M2:
559 case RISCV::PseudoVSPILL4_M1:
560 case RISCV::PseudoVSPILL4_M2:
561 case RISCV::PseudoVSPILL5_M1:
562 case RISCV::PseudoVSPILL6_M1:
563 case RISCV::PseudoVSPILL7_M1:
564 case RISCV::PseudoVSPILL8_M1:
566 return true;
567 case RISCV::PseudoVRELOAD2_M1:
568 case RISCV::PseudoVRELOAD2_M2:
569 case RISCV::PseudoVRELOAD2_M4:
570 case RISCV::PseudoVRELOAD3_M1:
571 case RISCV::PseudoVRELOAD3_M2:
572 case RISCV::PseudoVRELOAD4_M1:
573 case RISCV::PseudoVRELOAD4_M2:
574 case RISCV::PseudoVRELOAD5_M1:
575 case RISCV::PseudoVRELOAD6_M1:
576 case RISCV::PseudoVRELOAD7_M1:
577 case RISCV::PseudoVRELOAD8_M1:
579 return true;
580 }
581
582 return false;
583}
584
586 const MachineFunction &MF) const {
587 return true;
588}
589
590// Returns true if the instruction's frame index reference would be better
591// served by a base register other than FP or SP.
592// Used by LocalStackSlotAllocation pass to determine which frame index
593// references it should create new base registers for.
595 int64_t Offset) const {
596 unsigned FIOperandNum = 0;
597 for (; !MI->getOperand(FIOperandNum).isFI(); FIOperandNum++)
598 assert(FIOperandNum < MI->getNumOperands() &&
599 "Instr doesn't have FrameIndex operand");
600
601 // For RISC-V, The machine instructions that include a FrameIndex operand
602 // are load/store, ADDI instructions.
603 unsigned MIFrm = RISCVII::getFormat(MI->getDesc().TSFlags);
604 if (MIFrm != RISCVII::InstFormatI && MIFrm != RISCVII::InstFormatS)
605 return false;
606 // We only generate virtual base registers for loads and stores, so
607 // return false for everything else.
608 if (!MI->mayLoad() && !MI->mayStore())
609 return false;
610
611 const MachineFunction &MF = *MI->getMF();
612 const MachineFrameInfo &MFI = MF.getFrameInfo();
613 const RISCVFrameLowering *TFI = getFrameLowering(MF);
614 const MachineRegisterInfo &MRI = MF.getRegInfo();
615
616 if (TFI->hasFP(MF) && !shouldRealignStack(MF)) {
617 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
618 // Estimate the stack size used to store callee saved registers(
619 // excludes reserved registers).
620 unsigned CalleeSavedSize = 0;
621 for (const MCPhysReg *R = MRI.getCalleeSavedRegs(); MCPhysReg Reg = *R;
622 ++R) {
623 if (Subtarget.isRegisterReservedByUser(Reg))
624 continue;
625
626 if (RISCV::GPRRegClass.contains(Reg))
627 CalleeSavedSize += getSpillSize(RISCV::GPRRegClass);
628 else if (RISCV::FPR64RegClass.contains(Reg))
629 CalleeSavedSize += getSpillSize(RISCV::FPR64RegClass);
630 else if (RISCV::FPR32RegClass.contains(Reg))
631 CalleeSavedSize += getSpillSize(RISCV::FPR32RegClass);
632 // Ignore vector registers.
633 }
634
635 int64_t MaxFPOffset = Offset - CalleeSavedSize;
636 return !isFrameOffsetLegal(MI, RISCV::X8, MaxFPOffset);
637 }
638
639 // Assume 128 bytes spill slots size to estimate the maximum possible
640 // offset relative to the stack pointer.
641 // FIXME: The 128 is copied from ARM. We should run some statistics and pick a
642 // real one for RISC-V.
643 int64_t MaxSPOffset = Offset + 128;
644 MaxSPOffset += MFI.getLocalFrameSize();
645 return !isFrameOffsetLegal(MI, RISCV::X2, MaxSPOffset);
646}
647
648// Determine whether a given base register plus offset immediate is
649// encodable to resolve a frame index.
651 Register BaseReg,
652 int64_t Offset) const {
653 unsigned FIOperandNum = 0;
654 while (!MI->getOperand(FIOperandNum).isFI()) {
655 FIOperandNum++;
656 assert(FIOperandNum < MI->getNumOperands() &&
657 "Instr does not have a FrameIndex operand!");
658 }
659
660 Offset += getFrameIndexInstrOffset(MI, FIOperandNum);
661 return isInt<12>(Offset);
662}
663
664// Insert defining instruction(s) for a pointer to FrameIdx before
665// insertion point I.
666// Return materialized frame pointer.
668 int FrameIdx,
669 int64_t Offset) const {
671 DebugLoc DL;
672 if (MBBI != MBB->end())
673 DL = MBBI->getDebugLoc();
675 MachineRegisterInfo &MFI = MF->getRegInfo();
677
678 Register BaseReg = MFI.createVirtualRegister(&RISCV::GPRRegClass);
679 BuildMI(*MBB, MBBI, DL, TII->get(RISCV::ADDI), BaseReg)
680 .addFrameIndex(FrameIdx)
681 .addImm(Offset);
682 return BaseReg;
683}
684
685// Resolve a frame index operand of an instruction to reference the
686// indicated base register plus offset instead.
688 int64_t Offset) const {
689 unsigned FIOperandNum = 0;
690 while (!MI.getOperand(FIOperandNum).isFI()) {
691 FIOperandNum++;
692 assert(FIOperandNum < MI.getNumOperands() &&
693 "Instr does not have a FrameIndex operand!");
694 }
695
696 Offset += getFrameIndexInstrOffset(&MI, FIOperandNum);
697 // FrameIndex Operands are always represented as a
698 // register followed by an immediate.
699 MI.getOperand(FIOperandNum).ChangeToRegister(BaseReg, false);
700 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
701}
702
703// Get the offset from the referenced frame index in the instruction,
704// if there is one.
706 int Idx) const {
707 assert((RISCVII::getFormat(MI->getDesc().TSFlags) == RISCVII::InstFormatI ||
708 RISCVII::getFormat(MI->getDesc().TSFlags) == RISCVII::InstFormatS) &&
709 "The MI must be I or S format.");
710 assert(MI->getOperand(Idx).isFI() && "The Idx'th operand of MI is not a "
711 "FrameIndex operand");
712 return MI->getOperand(Idx + 1).getImm();
713}
714
716 const TargetFrameLowering *TFI = getFrameLowering(MF);
717 return TFI->hasFP(MF) ? RISCV::X8 : RISCV::X2;
718}
719
721 if (Reg == RISCV::SF_VCIX_STATE)
722 return "sf.vcix_state";
724}
725
726const uint32_t *
728 CallingConv::ID CC) const {
729 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
730
731 if (CC == CallingConv::GHC)
732 return CSR_NoRegs_RegMask;
733 switch (Subtarget.getTargetABI()) {
734 default:
735 llvm_unreachable("Unrecognized ABI");
738 return CSR_ILP32E_LP64E_RegMask;
742 return CSR_ILP32_LP64_V_RegMask;
743 return CSR_ILP32_LP64_RegMask;
747 return CSR_ILP32F_LP64F_V_RegMask;
748 return CSR_ILP32F_LP64F_RegMask;
752 return CSR_ILP32D_LP64D_V_RegMask;
753 return CSR_ILP32D_LP64D_RegMask;
754 }
755}
756
759 const MachineFunction &) const {
760 if (RC == &RISCV::VMV0RegClass)
761 return &RISCV::VRRegClass;
762 if (RC == &RISCV::VRNoV0RegClass)
763 return &RISCV::VRRegClass;
764 if (RC == &RISCV::VRM2NoV0RegClass)
765 return &RISCV::VRM2RegClass;
766 if (RC == &RISCV::VRM4NoV0RegClass)
767 return &RISCV::VRM4RegClass;
768 if (RC == &RISCV::VRM8NoV0RegClass)
769 return &RISCV::VRM8RegClass;
770 return RC;
771}
772
774 SmallVectorImpl<uint64_t> &Ops) const {
775 // VLENB is the length of a vector register in bytes. We use <vscale x 8 x i8>
776 // to represent one vector register. The dwarf offset is
777 // VLENB * scalable_offset / 8.
778 assert(Offset.getScalable() % 8 == 0 && "Invalid frame offset");
779
780 // Add fixed-sized offset using existing DIExpression interface.
781 DIExpression::appendOffset(Ops, Offset.getFixed());
782
783 unsigned VLENB = getDwarfRegNum(RISCV::VLENB, true);
784 int64_t VLENBSized = Offset.getScalable() / 8;
785 if (VLENBSized > 0) {
786 Ops.push_back(dwarf::DW_OP_constu);
787 Ops.push_back(VLENBSized);
788 Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL});
789 Ops.push_back(dwarf::DW_OP_mul);
790 Ops.push_back(dwarf::DW_OP_plus);
791 } else if (VLENBSized < 0) {
792 Ops.push_back(dwarf::DW_OP_constu);
793 Ops.push_back(-VLENBSized);
794 Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL});
795 Ops.push_back(dwarf::DW_OP_mul);
796 Ops.push_back(dwarf::DW_OP_minus);
797 }
798}
799
800unsigned
802 return MF.getSubtarget<RISCVSubtarget>().hasStdExtCOrZca() &&
804 ? 1
805 : 0;
806}
807
808// Add two address hints to improve chances of being able to use a compressed
809// instruction.
811 Register VirtReg, ArrayRef<MCPhysReg> Order,
813 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
814 const MachineRegisterInfo *MRI = &MF.getRegInfo();
815 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
816
817 bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints(
818 VirtReg, Order, Hints, MF, VRM, Matrix);
819
820 if (!VRM || DisableRegAllocHints)
821 return BaseImplRetVal;
822
823 // Add any two address hints after any copy hints.
824 SmallSet<Register, 4> TwoAddrHints;
825
826 auto tryAddHint = [&](const MachineOperand &VRRegMO, const MachineOperand &MO,
827 bool NeedGPRC) -> void {
828 Register Reg = MO.getReg();
829 Register PhysReg = Reg.isPhysical() ? Reg : Register(VRM->getPhys(Reg));
830 // TODO: Support GPRPair subregisters? Need to be careful with even/odd
831 // registers. If the virtual register is an odd register of a pair and the
832 // physical register is even (or vice versa), we should not add the hint.
833 if (PhysReg && (!NeedGPRC || RISCV::GPRCRegClass.contains(PhysReg)) &&
834 !MO.getSubReg() && !VRRegMO.getSubReg()) {
835 if (!MRI->isReserved(PhysReg) && !is_contained(Hints, PhysReg))
836 TwoAddrHints.insert(PhysReg);
837 }
838 };
839
840 // This is all of the compressible binary instructions. If an instruction
841 // needs GPRC register class operands \p NeedGPRC will be set to true.
842 auto isCompressible = [&Subtarget](const MachineInstr &MI, bool &NeedGPRC) {
843 NeedGPRC = false;
844 switch (MI.getOpcode()) {
845 default:
846 return false;
847 case RISCV::AND:
848 case RISCV::OR:
849 case RISCV::XOR:
850 case RISCV::SUB:
851 case RISCV::ADDW:
852 case RISCV::SUBW:
853 NeedGPRC = true;
854 return true;
855 case RISCV::ANDI: {
856 NeedGPRC = true;
857 if (!MI.getOperand(2).isImm())
858 return false;
859 int64_t Imm = MI.getOperand(2).getImm();
860 if (isInt<6>(Imm))
861 return true;
862 // c.zext.b
863 return Subtarget.hasStdExtZcb() && Imm == 255;
864 }
865 case RISCV::SRAI:
866 case RISCV::SRLI:
867 NeedGPRC = true;
868 return true;
869 case RISCV::ADD:
870 case RISCV::SLLI:
871 return true;
872 case RISCV::ADDI:
873 case RISCV::ADDIW:
874 return MI.getOperand(2).isImm() && isInt<6>(MI.getOperand(2).getImm());
875 case RISCV::MUL:
876 case RISCV::SEXT_B:
877 case RISCV::SEXT_H:
878 case RISCV::ZEXT_H_RV32:
879 case RISCV::ZEXT_H_RV64:
880 // c.mul, c.sext.b, c.sext.h, c.zext.h
881 NeedGPRC = true;
882 return Subtarget.hasStdExtZcb();
883 case RISCV::ADD_UW:
884 // c.zext.w
885 NeedGPRC = true;
886 return Subtarget.hasStdExtZcb() && MI.getOperand(2).isReg() &&
887 MI.getOperand(2).getReg() == RISCV::X0;
888 case RISCV::XORI:
889 // c.not
890 NeedGPRC = true;
891 return Subtarget.hasStdExtZcb() && MI.getOperand(2).isImm() &&
892 MI.getOperand(2).getImm() == -1;
893 }
894 };
895
896 // Returns true if this operand is compressible. For non-registers it always
897 // returns true. Immediate range was already checked in isCompressible.
898 // For registers, it checks if the register is a GPRC register. reg-reg
899 // instructions that require GPRC need all register operands to be GPRC.
900 auto isCompressibleOpnd = [&](const MachineOperand &MO) {
901 if (!MO.isReg())
902 return true;
903 Register Reg = MO.getReg();
904 Register PhysReg = Reg.isPhysical() ? Reg : Register(VRM->getPhys(Reg));
905 return PhysReg && RISCV::GPRCRegClass.contains(PhysReg);
906 };
907
908 for (auto &MO : MRI->reg_nodbg_operands(VirtReg)) {
909 const MachineInstr &MI = *MO.getParent();
910 unsigned OpIdx = MO.getOperandNo();
911 bool NeedGPRC;
912 if (isCompressible(MI, NeedGPRC)) {
913 if (OpIdx == 0 && MI.getOperand(1).isReg()) {
914 if (!NeedGPRC || MI.getNumExplicitOperands() < 3 ||
915 MI.getOpcode() == RISCV::ADD_UW ||
916 isCompressibleOpnd(MI.getOperand(2)))
917 tryAddHint(MO, MI.getOperand(1), NeedGPRC);
918 if (MI.isCommutable() && MI.getOperand(2).isReg() &&
919 (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1))))
920 tryAddHint(MO, MI.getOperand(2), NeedGPRC);
921 } else if (OpIdx == 1 && (!NeedGPRC || MI.getNumExplicitOperands() < 3 ||
922 isCompressibleOpnd(MI.getOperand(2)))) {
923 tryAddHint(MO, MI.getOperand(0), NeedGPRC);
924 } else if (MI.isCommutable() && OpIdx == 2 &&
925 (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1)))) {
926 tryAddHint(MO, MI.getOperand(0), NeedGPRC);
927 }
928 }
929 }
930
931 for (MCPhysReg OrderReg : Order)
932 if (TwoAddrHints.count(OrderReg))
933 Hints.push_back(OrderReg);
934
935 return BaseImplRetVal;
936}
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Live Register Matrix
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
static cl::opt< bool > DisableRegAllocHints("riscv-disable-regalloc-hints", cl::Hidden, cl::init(false), cl::desc("Disable two address hints for register " "allocation"))
static cl::opt< bool > DisableCostPerUse("riscv-disable-cost-per-use", cl::init(false), cl::Hidden)
This file declares the machine register scavenger class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
This file defines the SmallSet class.
static unsigned getDwarfRegNum(unsigned Reg, const TargetRegisterInfo *TRI)
Go up the super-register chain until we hit a valid dwarf register number.
Definition: StackMaps.cpp:194
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
static void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
A debug info location.
Definition: DebugLoc.h:33
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:277
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:731
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int64_t getLocalFrameSize() const
Get the size of the local object blob.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
Definition: MachineInstr.h:69
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool hasBP(const MachineFunction &MF) const
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
std::optional< unsigned > getRealVLen() const
const RISCVRegisterInfo * getRegisterInfo() const override
const RISCVInstrInfo * getInstrInfo() const override
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:132
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:175
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:181
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:683
void push_back(const T &Elt)
Definition: SmallVector.h:413
StackOffset holds a fixed and a scalable offset in bytes.
Definition: TypeSize.h:33
int64_t getFixed() const
Returns the fixed component of the stack.
Definition: TypeSize.h:49
static StackOffset get(int64_t Fixed, int64_t Scalable)
Definition: TypeSize.h:44
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
Information about stack frame layout on the target.
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
TargetInstrInfo - Interface to description of machine instruction set.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual StringRef getRegAsmName(MCRegister Reg) const
Return the assembly name for Reg.
virtual bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM=nullptr, const LiveRegMatrix *Matrix=nullptr) const
Get a list of 'hint' registers that the register allocator should try first when allocating a physica...
virtual bool isRegisterReservedByUser(Register R) const
virtual const TargetInstrInfo * getInstrInfo() const
MCRegister getPhys(Register virtReg) const
returns the physical register mapped to the specified virtual register
Definition: VirtRegMap.h:90
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ RISCV_VectorCall
Calling convention used for RISC-V V-extension.
Definition: CallingConv.h:268
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
Definition: CallingConv.h:50
@ GRAAL
Used by GraalVM. Two additional registers are reserved.
Definition: CallingConv.h:255
MCRegister getBPReg()
static unsigned getFormat(uint64_t TSFlags)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
static constexpr unsigned RVVBitsPerBlock
bool isRVVSpill(const MachineInstr &MI)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Kill
The last use of a register.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:480
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:340
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
unsigned getKillRegState(bool B)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1903
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override
const TargetRegisterClass * getLargestLegalSuperClass(const TargetRegisterClass *RC, const MachineFunction &) const override
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
void lowerVRELOAD(MachineBasicBlock::iterator II) const
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
Register materializeFrameBaseRegister(MachineBasicBlock *MBB, int FrameIdx, int64_t Offset) const override
RISCVRegisterInfo(unsigned HwMode)
void getOffsetOpcodes(const StackOffset &Offset, SmallVectorImpl< uint64_t > &Ops) const override
bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, int64_t Offset) const override
void lowerVSPILL(MachineBasicBlock::iterator II) const
Register getFrameRegister(const MachineFunction &MF) const override
void adjustReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, Register SrcReg, StackOffset Offset, MachineInstr::MIFlag Flag, MaybeAlign RequiredAlign) const
bool isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const override
const uint32_t * getNoPreservedMask() const override
void resolveFrameIndex(MachineInstr &MI, Register BaseReg, int64_t Offset) const override
bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const override
int64_t getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const override
unsigned getRegisterCostTableIndex(const MachineFunction &MF) const override
StringRef getRegAsmName(MCRegister Reg) const override
bool eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const override