LLVM 19.0.0git
RISCVRegisterInfo.cpp
Go to the documentation of this file.
1//===-- RISCVRegisterInfo.cpp - RISC-V Register Information -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the RISC-V implementation of the TargetRegisterInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "RISCVRegisterInfo.h"
14#include "RISCV.h"
16#include "RISCVSubtarget.h"
17#include "llvm/ADT/SmallSet.h"
27
28#define GET_REGINFO_TARGET_DESC
29#include "RISCVGenRegisterInfo.inc"
30
31using namespace llvm;
32
33static cl::opt<bool> DisableCostPerUse("riscv-disable-cost-per-use",
34 cl::init(false), cl::Hidden);
35static cl::opt<bool>
36 DisableRegAllocHints("riscv-disable-regalloc-hints", cl::Hidden,
37 cl::init(false),
38 cl::desc("Disable two address hints for register "
39 "allocation"));
40
41static_assert(RISCV::X1 == RISCV::X0 + 1, "Register list not consecutive");
42static_assert(RISCV::X31 == RISCV::X0 + 31, "Register list not consecutive");
43static_assert(RISCV::F1_H == RISCV::F0_H + 1, "Register list not consecutive");
44static_assert(RISCV::F31_H == RISCV::F0_H + 31,
45 "Register list not consecutive");
46static_assert(RISCV::F1_F == RISCV::F0_F + 1, "Register list not consecutive");
47static_assert(RISCV::F31_F == RISCV::F0_F + 31,
48 "Register list not consecutive");
49static_assert(RISCV::F1_D == RISCV::F0_D + 1, "Register list not consecutive");
50static_assert(RISCV::F31_D == RISCV::F0_D + 31,
51 "Register list not consecutive");
52static_assert(RISCV::V1 == RISCV::V0 + 1, "Register list not consecutive");
53static_assert(RISCV::V31 == RISCV::V0 + 31, "Register list not consecutive");
54
56 : RISCVGenRegisterInfo(RISCV::X1, /*DwarfFlavour*/0, /*EHFlavor*/0,
57 /*PC*/0, HwMode) {}
58
59const MCPhysReg *
61 auto &Subtarget = MF->getSubtarget<RISCVSubtarget>();
63 return CSR_NoRegs_SaveList;
64 if (MF->getFunction().hasFnAttribute("interrupt")) {
65 if (Subtarget.hasStdExtD())
66 return CSR_XLEN_F64_Interrupt_SaveList;
67 if (Subtarget.hasStdExtF())
68 return Subtarget.hasStdExtE() ? CSR_XLEN_F32_Interrupt_RVE_SaveList
69 : CSR_XLEN_F32_Interrupt_SaveList;
70 return Subtarget.hasStdExtE() ? CSR_Interrupt_RVE_SaveList
71 : CSR_Interrupt_SaveList;
72 }
73
74 bool HasVectorCSR =
76 Subtarget.hasVInstructions();
77
78 switch (Subtarget.getTargetABI()) {
79 default:
80 llvm_unreachable("Unrecognized ABI");
83 return CSR_ILP32E_LP64E_SaveList;
86 if (HasVectorCSR)
87 return CSR_ILP32_LP64_V_SaveList;
88 return CSR_ILP32_LP64_SaveList;
91 if (HasVectorCSR)
92 return CSR_ILP32F_LP64F_V_SaveList;
93 return CSR_ILP32F_LP64F_SaveList;
96 if (HasVectorCSR)
97 return CSR_ILP32D_LP64D_V_SaveList;
98 return CSR_ILP32D_LP64D_SaveList;
99 }
100}
101
103 const RISCVFrameLowering *TFI = getFrameLowering(MF);
104 BitVector Reserved(getNumRegs());
105 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
106
107 // Mark any registers requested to be reserved as such
108 for (size_t Reg = 0; Reg < getNumRegs(); Reg++) {
109 if (Subtarget.isRegisterReservedByUser(Reg))
110 markSuperRegs(Reserved, Reg);
111 }
112
113 // Use markSuperRegs to ensure any register aliases are also reserved
114 markSuperRegs(Reserved, RISCV::X0); // zero
115 markSuperRegs(Reserved, RISCV::X2); // sp
116 markSuperRegs(Reserved, RISCV::X3); // gp
117 markSuperRegs(Reserved, RISCV::X4); // tp
118 if (TFI->hasFP(MF))
119 markSuperRegs(Reserved, RISCV::X8); // fp
120 // Reserve the base register if we need to realign the stack and allocate
121 // variable-sized objects at runtime.
122 if (TFI->hasBP(MF))
123 markSuperRegs(Reserved, RISCVABI::getBPReg()); // bp
124
125 // Additionally reserve dummy register used to form the register pair
126 // beginning with 'x0' for instructions that take register pairs.
127 markSuperRegs(Reserved, RISCV::DUMMY_REG_PAIR_WITH_X0);
128
129 // There are only 16 GPRs for RVE.
130 if (Subtarget.hasStdExtE())
131 for (MCPhysReg Reg = RISCV::X16; Reg <= RISCV::X31; Reg++)
132 markSuperRegs(Reserved, Reg);
133
134 // V registers for code generation. We handle them manually.
135 markSuperRegs(Reserved, RISCV::VL);
136 markSuperRegs(Reserved, RISCV::VTYPE);
137 markSuperRegs(Reserved, RISCV::VXSAT);
138 markSuperRegs(Reserved, RISCV::VXRM);
139 markSuperRegs(Reserved, RISCV::VLENB); // vlenb (constant)
140
141 // Floating point environment registers.
142 markSuperRegs(Reserved, RISCV::FRM);
143 markSuperRegs(Reserved, RISCV::FFLAGS);
144
145 // SiFive VCIX state registers.
146 markSuperRegs(Reserved, RISCV::VCIX_STATE);
147
149 if (Subtarget.hasStdExtE())
150 report_fatal_error("Graal reserved registers do not exist in RVE");
151 markSuperRegs(Reserved, RISCV::X23);
152 markSuperRegs(Reserved, RISCV::X27);
153 }
154
155 // Shadow stack pointer.
156 markSuperRegs(Reserved, RISCV::SSP);
157
158 assert(checkAllSuperRegsMarked(Reserved));
159 return Reserved;
160}
161
163 MCRegister PhysReg) const {
164 return !MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(PhysReg);
165}
166
168 return CSR_NoRegs_RegMask;
169}
170
173 const DebugLoc &DL, Register DestReg,
176 MaybeAlign RequiredAlign) const {
177
178 if (DestReg == SrcReg && !Offset.getFixed() && !Offset.getScalable())
179 return;
180
184 const RISCVInstrInfo *TII = ST.getInstrInfo();
185
186 bool KillSrcReg = false;
187
188 if (Offset.getScalable()) {
189 unsigned ScalableAdjOpc = RISCV::ADD;
190 int64_t ScalableValue = Offset.getScalable();
191 if (ScalableValue < 0) {
192 ScalableValue = -ScalableValue;
193 ScalableAdjOpc = RISCV::SUB;
194 }
195 // Get vlenb and multiply vlen with the number of vector registers.
196 Register ScratchReg = DestReg;
197 if (DestReg == SrcReg)
198 ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
199
200 assert(ScalableValue > 0 && "There is no need to get VLEN scaled value.");
201 assert(ScalableValue % 8 == 0 &&
202 "Reserve the stack by the multiple of one vector size.");
203 assert(isInt<32>(ScalableValue / 8) &&
204 "Expect the number of vector registers within 32-bits.");
205 uint32_t NumOfVReg = ScalableValue / 8;
206 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), ScratchReg)
207 .setMIFlag(Flag);
208
209 if (ScalableAdjOpc == RISCV::ADD && ST.hasStdExtZba() &&
210 (NumOfVReg == 2 || NumOfVReg == 4 || NumOfVReg == 8)) {
211 unsigned Opc = NumOfVReg == 2 ? RISCV::SH1ADD :
212 (NumOfVReg == 4 ? RISCV::SH2ADD : RISCV::SH3ADD);
213 BuildMI(MBB, II, DL, TII->get(Opc), DestReg)
214 .addReg(ScratchReg, RegState::Kill).addReg(SrcReg)
215 .setMIFlag(Flag);
216 } else {
217 TII->mulImm(MF, MBB, II, DL, ScratchReg, NumOfVReg, Flag);
218 BuildMI(MBB, II, DL, TII->get(ScalableAdjOpc), DestReg)
219 .addReg(SrcReg).addReg(ScratchReg, RegState::Kill)
220 .setMIFlag(Flag);
221 }
222 SrcReg = DestReg;
223 KillSrcReg = true;
224 }
225
226 int64_t Val = Offset.getFixed();
227 if (DestReg == SrcReg && Val == 0)
228 return;
229
230 const uint64_t Align = RequiredAlign.valueOrOne().value();
231
232 if (isInt<12>(Val)) {
233 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
234 .addReg(SrcReg, getKillRegState(KillSrcReg))
235 .addImm(Val)
236 .setMIFlag(Flag);
237 return;
238 }
239
240 // Try to split the offset across two ADDIs. We need to keep the intermediate
241 // result aligned after each ADDI. We need to determine the maximum value we
242 // can put in each ADDI. In the negative direction, we can use -2048 which is
243 // always sufficiently aligned. In the positive direction, we need to find the
244 // largest 12-bit immediate that is aligned. Exclude -4096 since it can be
245 // created with LUI.
246 assert(Align < 2048 && "Required alignment too large");
247 int64_t MaxPosAdjStep = 2048 - Align;
248 if (Val > -4096 && Val <= (2 * MaxPosAdjStep)) {
249 int64_t FirstAdj = Val < 0 ? -2048 : MaxPosAdjStep;
250 Val -= FirstAdj;
251 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
252 .addReg(SrcReg, getKillRegState(KillSrcReg))
253 .addImm(FirstAdj)
254 .setMIFlag(Flag);
255 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
256 .addReg(DestReg, RegState::Kill)
257 .addImm(Val)
258 .setMIFlag(Flag);
259 return;
260 }
261
262 // Use shNadd if doing so lets us materialize a 12 bit immediate with a single
263 // instruction. This saves 1 instruction over the full lui/addi+add fallback
264 // path. We avoid anything which can be done with a single lui as it might
265 // be compressible. Note that the sh1add case is fully covered by the 2x addi
266 // case just above and is thus ommitted.
267 if (ST.hasStdExtZba() && (Val & 0xFFF) != 0) {
268 unsigned Opc = 0;
269 if (isShiftedInt<12, 3>(Val)) {
270 Opc = RISCV::SH3ADD;
271 Val = Val >> 3;
272 } else if (isShiftedInt<12, 2>(Val)) {
273 Opc = RISCV::SH2ADD;
274 Val = Val >> 2;
275 }
276 if (Opc) {
277 Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
278 TII->movImm(MBB, II, DL, ScratchReg, Val, Flag);
279 BuildMI(MBB, II, DL, TII->get(Opc), DestReg)
280 .addReg(ScratchReg, RegState::Kill)
281 .addReg(SrcReg, getKillRegState(KillSrcReg))
282 .setMIFlag(Flag);
283 return;
284 }
285 }
286
287 unsigned Opc = RISCV::ADD;
288 if (Val < 0) {
289 Val = -Val;
290 Opc = RISCV::SUB;
291 }
292
293 Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
294 TII->movImm(MBB, II, DL, ScratchReg, Val, Flag);
295 BuildMI(MBB, II, DL, TII->get(Opc), DestReg)
296 .addReg(SrcReg, getKillRegState(KillSrcReg))
297 .addReg(ScratchReg, RegState::Kill)
298 .setMIFlag(Flag);
299}
300
301// Split a VSPILLx_Mx pseudo into multiple whole register stores separated by
302// LMUL*VLENB bytes.
304 DebugLoc DL = II->getDebugLoc();
305 MachineBasicBlock &MBB = *II->getParent();
308 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
309 const TargetInstrInfo *TII = STI.getInstrInfo();
311
312 auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(II->getOpcode());
313 unsigned NF = ZvlssegInfo->first;
314 unsigned LMUL = ZvlssegInfo->second;
315 assert(NF * LMUL <= 8 && "Invalid NF/LMUL combinations.");
316 unsigned Opcode, SubRegIdx;
317 switch (LMUL) {
318 default:
319 llvm_unreachable("LMUL must be 1, 2, or 4.");
320 case 1:
321 Opcode = RISCV::VS1R_V;
322 SubRegIdx = RISCV::sub_vrm1_0;
323 break;
324 case 2:
325 Opcode = RISCV::VS2R_V;
326 SubRegIdx = RISCV::sub_vrm2_0;
327 break;
328 case 4:
329 Opcode = RISCV::VS4R_V;
330 SubRegIdx = RISCV::sub_vrm4_0;
331 break;
332 }
333 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
334 "Unexpected subreg numbering");
335 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
336 "Unexpected subreg numbering");
337 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
338 "Unexpected subreg numbering");
339
340 Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
341 // Optimize for constant VLEN.
342 if (auto VLEN = STI.getRealVLen()) {
343 const int64_t VLENB = *VLEN / 8;
344 int64_t Offset = VLENB * LMUL;
345 STI.getInstrInfo()->movImm(MBB, II, DL, VL, Offset);
346 } else {
347 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL);
348 uint32_t ShiftAmount = Log2_32(LMUL);
349 if (ShiftAmount != 0)
350 BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL)
351 .addReg(VL)
352 .addImm(ShiftAmount);
353 }
354
355 Register SrcReg = II->getOperand(0).getReg();
356 Register Base = II->getOperand(1).getReg();
357 bool IsBaseKill = II->getOperand(1).isKill();
358 Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass);
359 for (unsigned I = 0; I < NF; ++I) {
360 // Adding implicit-use of super register to describe we are using part of
361 // super register, that prevents machine verifier complaining when part of
362 // subreg is undef, see comment in MachineVerifier::checkLiveness for more
363 // detail.
364 BuildMI(MBB, II, DL, TII->get(Opcode))
365 .addReg(TRI->getSubReg(SrcReg, SubRegIdx + I))
366 .addReg(Base, getKillRegState(I == NF - 1))
367 .addMemOperand(*(II->memoperands_begin()))
368 .addReg(SrcReg, RegState::Implicit);
369 if (I != NF - 1)
370 BuildMI(MBB, II, DL, TII->get(RISCV::ADD), NewBase)
371 .addReg(Base, getKillRegState(I != 0 || IsBaseKill))
372 .addReg(VL, getKillRegState(I == NF - 2));
373 Base = NewBase;
374 }
375 II->eraseFromParent();
376}
377
378// Split a VSPILLx_Mx pseudo into multiple whole register loads separated by
379// LMUL*VLENB bytes.
381 DebugLoc DL = II->getDebugLoc();
382 MachineBasicBlock &MBB = *II->getParent();
385 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
386 const TargetInstrInfo *TII = STI.getInstrInfo();
388
389 auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(II->getOpcode());
390 unsigned NF = ZvlssegInfo->first;
391 unsigned LMUL = ZvlssegInfo->second;
392 assert(NF * LMUL <= 8 && "Invalid NF/LMUL combinations.");
393 unsigned Opcode, SubRegIdx;
394 switch (LMUL) {
395 default:
396 llvm_unreachable("LMUL must be 1, 2, or 4.");
397 case 1:
398 Opcode = RISCV::VL1RE8_V;
399 SubRegIdx = RISCV::sub_vrm1_0;
400 break;
401 case 2:
402 Opcode = RISCV::VL2RE8_V;
403 SubRegIdx = RISCV::sub_vrm2_0;
404 break;
405 case 4:
406 Opcode = RISCV::VL4RE8_V;
407 SubRegIdx = RISCV::sub_vrm4_0;
408 break;
409 }
410 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
411 "Unexpected subreg numbering");
412 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
413 "Unexpected subreg numbering");
414 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
415 "Unexpected subreg numbering");
416
417 Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
418 // Optimize for constant VLEN.
419 if (auto VLEN = STI.getRealVLen()) {
420 const int64_t VLENB = *VLEN / 8;
421 int64_t Offset = VLENB * LMUL;
422 STI.getInstrInfo()->movImm(MBB, II, DL, VL, Offset);
423 } else {
424 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL);
425 uint32_t ShiftAmount = Log2_32(LMUL);
426 if (ShiftAmount != 0)
427 BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL)
428 .addReg(VL)
429 .addImm(ShiftAmount);
430 }
431
432 Register DestReg = II->getOperand(0).getReg();
433 Register Base = II->getOperand(1).getReg();
434 bool IsBaseKill = II->getOperand(1).isKill();
435 Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass);
436 for (unsigned I = 0; I < NF; ++I) {
437 BuildMI(MBB, II, DL, TII->get(Opcode),
438 TRI->getSubReg(DestReg, SubRegIdx + I))
439 .addReg(Base, getKillRegState(I == NF - 1))
440 .addMemOperand(*(II->memoperands_begin()));
441 if (I != NF - 1)
442 BuildMI(MBB, II, DL, TII->get(RISCV::ADD), NewBase)
443 .addReg(Base, getKillRegState(I != 0 || IsBaseKill))
444 .addReg(VL, getKillRegState(I == NF - 2));
445 Base = NewBase;
446 }
447 II->eraseFromParent();
448}
449
451 int SPAdj, unsigned FIOperandNum,
452 RegScavenger *RS) const {
453 assert(SPAdj == 0 && "Unexpected non-zero SPAdj value");
454
455 MachineInstr &MI = *II;
456 MachineFunction &MF = *MI.getParent()->getParent();
459 DebugLoc DL = MI.getDebugLoc();
460
461 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
462 Register FrameReg;
464 getFrameLowering(MF)->getFrameIndexReference(MF, FrameIndex, FrameReg);
465 bool IsRVVSpill = RISCV::isRVVSpill(MI);
466 if (!IsRVVSpill)
467 Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
468
469 if (Offset.getScalable() &&
470 ST.getRealMinVLen() == ST.getRealMaxVLen()) {
471 // For an exact VLEN value, scalable offsets become constant and thus
472 // can be converted entirely into fixed offsets.
473 int64_t FixedValue = Offset.getFixed();
474 int64_t ScalableValue = Offset.getScalable();
475 assert(ScalableValue % 8 == 0 &&
476 "Scalable offset is not a multiple of a single vector size.");
477 int64_t NumOfVReg = ScalableValue / 8;
478 int64_t VLENB = ST.getRealMinVLen() / 8;
479 Offset = StackOffset::getFixed(FixedValue + NumOfVReg * VLENB);
480 }
481
482 if (!isInt<32>(Offset.getFixed())) {
484 "Frame offsets outside of the signed 32-bit range not supported");
485 }
486
487 if (!IsRVVSpill) {
488 int64_t Val = Offset.getFixed();
489 int64_t Lo12 = SignExtend64<12>(Val);
490 unsigned Opc = MI.getOpcode();
491 if (Opc == RISCV::ADDI && !isInt<12>(Val)) {
492 // We chose to emit the canonical immediate sequence rather than folding
493 // the offset into the using add under the theory that doing so doesn't
494 // save dynamic instruction count and some target may fuse the canonical
495 // 32 bit immediate sequence. We still need to clear the portion of the
496 // offset encoded in the immediate.
497 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
498 } else if ((Opc == RISCV::PREFETCH_I || Opc == RISCV::PREFETCH_R ||
499 Opc == RISCV::PREFETCH_W) &&
500 (Lo12 & 0b11111) != 0) {
501 // Prefetch instructions require the offset to be 32 byte aligned.
502 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
503 } else if ((Opc == RISCV::PseudoRV32ZdinxLD ||
504 Opc == RISCV::PseudoRV32ZdinxSD) &&
505 Lo12 >= 2044) {
506 // This instruction will be split into 2 instructions. The second
507 // instruction will add 4 to the immediate. If that would overflow 12
508 // bits, we can't fold the offset.
509 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
510 } else {
511 // We can encode an add with 12 bit signed immediate in the immediate
512 // operand of our user instruction. As a result, the remaining
513 // offset can by construction, at worst, a LUI and a ADD.
514 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Lo12);
516 Offset.getScalable());
517 }
518 }
519
520 if (Offset.getScalable() || Offset.getFixed()) {
521 Register DestReg;
522 if (MI.getOpcode() == RISCV::ADDI)
523 DestReg = MI.getOperand(0).getReg();
524 else
525 DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
526 adjustReg(*II->getParent(), II, DL, DestReg, FrameReg, Offset,
527 MachineInstr::NoFlags, std::nullopt);
528 MI.getOperand(FIOperandNum).ChangeToRegister(DestReg, /*IsDef*/false,
529 /*IsImp*/false,
530 /*IsKill*/true);
531 } else {
532 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, /*IsDef*/false,
533 /*IsImp*/false,
534 /*IsKill*/false);
535 }
536
537 // If after materializing the adjustment, we have a pointless ADDI, remove it
538 if (MI.getOpcode() == RISCV::ADDI &&
539 MI.getOperand(0).getReg() == MI.getOperand(1).getReg() &&
540 MI.getOperand(2).getImm() == 0) {
541 MI.eraseFromParent();
542 return true;
543 }
544
545 // Handle spill/fill of synthetic register classes for segment operations to
546 // ensure correctness in the edge case one gets spilled. There are many
547 // possible optimizations here, but given the extreme rarity of such spills,
548 // we prefer simplicity of implementation for now.
549 switch (MI.getOpcode()) {
550 case RISCV::PseudoVSPILL2_M1:
551 case RISCV::PseudoVSPILL2_M2:
552 case RISCV::PseudoVSPILL2_M4:
553 case RISCV::PseudoVSPILL3_M1:
554 case RISCV::PseudoVSPILL3_M2:
555 case RISCV::PseudoVSPILL4_M1:
556 case RISCV::PseudoVSPILL4_M2:
557 case RISCV::PseudoVSPILL5_M1:
558 case RISCV::PseudoVSPILL6_M1:
559 case RISCV::PseudoVSPILL7_M1:
560 case RISCV::PseudoVSPILL8_M1:
561 lowerVSPILL(II);
562 return true;
563 case RISCV::PseudoVRELOAD2_M1:
564 case RISCV::PseudoVRELOAD2_M2:
565 case RISCV::PseudoVRELOAD2_M4:
566 case RISCV::PseudoVRELOAD3_M1:
567 case RISCV::PseudoVRELOAD3_M2:
568 case RISCV::PseudoVRELOAD4_M1:
569 case RISCV::PseudoVRELOAD4_M2:
570 case RISCV::PseudoVRELOAD5_M1:
571 case RISCV::PseudoVRELOAD6_M1:
572 case RISCV::PseudoVRELOAD7_M1:
573 case RISCV::PseudoVRELOAD8_M1:
574 lowerVRELOAD(II);
575 return true;
576 }
577
578 return false;
579}
580
582 const MachineFunction &MF) const {
583 return true;
584}
585
586// Returns true if the instruction's frame index reference would be better
587// served by a base register other than FP or SP.
588// Used by LocalStackSlotAllocation pass to determine which frame index
589// references it should create new base registers for.
591 int64_t Offset) const {
592 unsigned FIOperandNum = 0;
593 for (; !MI->getOperand(FIOperandNum).isFI(); FIOperandNum++)
594 assert(FIOperandNum < MI->getNumOperands() &&
595 "Instr doesn't have FrameIndex operand");
596
597 // For RISC-V, The machine instructions that include a FrameIndex operand
598 // are load/store, ADDI instructions.
599 unsigned MIFrm = RISCVII::getFormat(MI->getDesc().TSFlags);
600 if (MIFrm != RISCVII::InstFormatI && MIFrm != RISCVII::InstFormatS)
601 return false;
602 // We only generate virtual base registers for loads and stores, so
603 // return false for everything else.
604 if (!MI->mayLoad() && !MI->mayStore())
605 return false;
606
607 const MachineFunction &MF = *MI->getMF();
608 const MachineFrameInfo &MFI = MF.getFrameInfo();
609 const RISCVFrameLowering *TFI = getFrameLowering(MF);
610 const MachineRegisterInfo &MRI = MF.getRegInfo();
611
612 if (TFI->hasFP(MF) && !shouldRealignStack(MF)) {
613 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
614 // Estimate the stack size used to store callee saved registers(
615 // excludes reserved registers).
616 unsigned CalleeSavedSize = 0;
617 for (const MCPhysReg *R = MRI.getCalleeSavedRegs(); MCPhysReg Reg = *R;
618 ++R) {
619 if (Subtarget.isRegisterReservedByUser(Reg))
620 continue;
621
622 if (RISCV::GPRRegClass.contains(Reg))
623 CalleeSavedSize += getSpillSize(RISCV::GPRRegClass);
624 else if (RISCV::FPR64RegClass.contains(Reg))
625 CalleeSavedSize += getSpillSize(RISCV::FPR64RegClass);
626 else if (RISCV::FPR32RegClass.contains(Reg))
627 CalleeSavedSize += getSpillSize(RISCV::FPR32RegClass);
628 // Ignore vector registers.
629 }
630
631 int64_t MaxFPOffset = Offset - CalleeSavedSize;
632 return !isFrameOffsetLegal(MI, RISCV::X8, MaxFPOffset);
633 }
634
635 // Assume 128 bytes spill slots size to estimate the maximum possible
636 // offset relative to the stack pointer.
637 // FIXME: The 128 is copied from ARM. We should run some statistics and pick a
638 // real one for RISC-V.
639 int64_t MaxSPOffset = Offset + 128;
640 MaxSPOffset += MFI.getLocalFrameSize();
641 return !isFrameOffsetLegal(MI, RISCV::X2, MaxSPOffset);
642}
643
644// Determine whether a given base register plus offset immediate is
645// encodable to resolve a frame index.
647 Register BaseReg,
648 int64_t Offset) const {
649 unsigned FIOperandNum = 0;
650 while (!MI->getOperand(FIOperandNum).isFI()) {
651 FIOperandNum++;
652 assert(FIOperandNum < MI->getNumOperands() &&
653 "Instr does not have a FrameIndex operand!");
654 }
655
656 Offset += getFrameIndexInstrOffset(MI, FIOperandNum);
657 return isInt<12>(Offset);
658}
659
660// Insert defining instruction(s) for a pointer to FrameIdx before
661// insertion point I.
662// Return materialized frame pointer.
664 int FrameIdx,
665 int64_t Offset) const {
667 DebugLoc DL;
668 if (MBBI != MBB->end())
669 DL = MBBI->getDebugLoc();
671 MachineRegisterInfo &MFI = MF->getRegInfo();
673
674 Register BaseReg = MFI.createVirtualRegister(&RISCV::GPRRegClass);
675 BuildMI(*MBB, MBBI, DL, TII->get(RISCV::ADDI), BaseReg)
676 .addFrameIndex(FrameIdx)
677 .addImm(Offset);
678 return BaseReg;
679}
680
681// Resolve a frame index operand of an instruction to reference the
682// indicated base register plus offset instead.
684 int64_t Offset) const {
685 unsigned FIOperandNum = 0;
686 while (!MI.getOperand(FIOperandNum).isFI()) {
687 FIOperandNum++;
688 assert(FIOperandNum < MI.getNumOperands() &&
689 "Instr does not have a FrameIndex operand!");
690 }
691
692 Offset += getFrameIndexInstrOffset(&MI, FIOperandNum);
693 // FrameIndex Operands are always represented as a
694 // register followed by an immediate.
695 MI.getOperand(FIOperandNum).ChangeToRegister(BaseReg, false);
696 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
697}
698
699// Get the offset from the referenced frame index in the instruction,
700// if there is one.
702 int Idx) const {
703 assert((RISCVII::getFormat(MI->getDesc().TSFlags) == RISCVII::InstFormatI ||
704 RISCVII::getFormat(MI->getDesc().TSFlags) == RISCVII::InstFormatS) &&
705 "The MI must be I or S format.");
706 assert(MI->getOperand(Idx).isFI() && "The Idx'th operand of MI is not a "
707 "FrameIndex operand");
708 return MI->getOperand(Idx + 1).getImm();
709}
710
712 const TargetFrameLowering *TFI = getFrameLowering(MF);
713 return TFI->hasFP(MF) ? RISCV::X8 : RISCV::X2;
714}
715
716const uint32_t *
718 CallingConv::ID CC) const {
719 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
720
721 if (CC == CallingConv::GHC)
722 return CSR_NoRegs_RegMask;
723 switch (Subtarget.getTargetABI()) {
724 default:
725 llvm_unreachable("Unrecognized ABI");
728 return CSR_ILP32E_LP64E_RegMask;
732 return CSR_ILP32_LP64_V_RegMask;
733 return CSR_ILP32_LP64_RegMask;
737 return CSR_ILP32F_LP64F_V_RegMask;
738 return CSR_ILP32F_LP64F_RegMask;
742 return CSR_ILP32D_LP64D_V_RegMask;
743 return CSR_ILP32D_LP64D_RegMask;
744 }
745}
746
749 const MachineFunction &) const {
750 if (RC == &RISCV::VMV0RegClass)
751 return &RISCV::VRRegClass;
752 if (RC == &RISCV::VRNoV0RegClass)
753 return &RISCV::VRRegClass;
754 if (RC == &RISCV::VRM2NoV0RegClass)
755 return &RISCV::VRM2RegClass;
756 if (RC == &RISCV::VRM4NoV0RegClass)
757 return &RISCV::VRM4RegClass;
758 if (RC == &RISCV::VRM8NoV0RegClass)
759 return &RISCV::VRM8RegClass;
760 return RC;
761}
762
764 SmallVectorImpl<uint64_t> &Ops) const {
765 // VLENB is the length of a vector register in bytes. We use <vscale x 8 x i8>
766 // to represent one vector register. The dwarf offset is
767 // VLENB * scalable_offset / 8.
768 assert(Offset.getScalable() % 8 == 0 && "Invalid frame offset");
769
770 // Add fixed-sized offset using existing DIExpression interface.
771 DIExpression::appendOffset(Ops, Offset.getFixed());
772
773 unsigned VLENB = getDwarfRegNum(RISCV::VLENB, true);
774 int64_t VLENBSized = Offset.getScalable() / 8;
775 if (VLENBSized > 0) {
776 Ops.push_back(dwarf::DW_OP_constu);
777 Ops.push_back(VLENBSized);
778 Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL});
779 Ops.push_back(dwarf::DW_OP_mul);
780 Ops.push_back(dwarf::DW_OP_plus);
781 } else if (VLENBSized < 0) {
782 Ops.push_back(dwarf::DW_OP_constu);
783 Ops.push_back(-VLENBSized);
784 Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL});
785 Ops.push_back(dwarf::DW_OP_mul);
786 Ops.push_back(dwarf::DW_OP_minus);
787 }
788}
789
790unsigned
792 return MF.getSubtarget<RISCVSubtarget>().hasStdExtCOrZca() &&
794 ? 1
795 : 0;
796}
797
798// Add two address hints to improve chances of being able to use a compressed
799// instruction.
801 Register VirtReg, ArrayRef<MCPhysReg> Order,
803 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
804 const MachineRegisterInfo *MRI = &MF.getRegInfo();
805 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
806
807 bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints(
808 VirtReg, Order, Hints, MF, VRM, Matrix);
809
810 if (!VRM || DisableRegAllocHints)
811 return BaseImplRetVal;
812
813 // Add any two address hints after any copy hints.
814 SmallSet<Register, 4> TwoAddrHints;
815
816 auto tryAddHint = [&](const MachineOperand &VRRegMO, const MachineOperand &MO,
817 bool NeedGPRC) -> void {
818 Register Reg = MO.getReg();
819 Register PhysReg = Reg.isPhysical() ? Reg : Register(VRM->getPhys(Reg));
820 // TODO: Support GPRPair subregisters? Need to be careful with even/odd
821 // registers. If the virtual register is an odd register of a pair and the
822 // physical register is even (or vice versa), we should not add the hint.
823 if (PhysReg && (!NeedGPRC || RISCV::GPRCRegClass.contains(PhysReg)) &&
824 !MO.getSubReg() && !VRRegMO.getSubReg()) {
825 if (!MRI->isReserved(PhysReg) && !is_contained(Hints, PhysReg))
826 TwoAddrHints.insert(PhysReg);
827 }
828 };
829
830 // This is all of the compressible binary instructions. If an instruction
831 // needs GPRC register class operands \p NeedGPRC will be set to true.
832 auto isCompressible = [&Subtarget](const MachineInstr &MI, bool &NeedGPRC) {
833 NeedGPRC = false;
834 switch (MI.getOpcode()) {
835 default:
836 return false;
837 case RISCV::AND:
838 case RISCV::OR:
839 case RISCV::XOR:
840 case RISCV::SUB:
841 case RISCV::ADDW:
842 case RISCV::SUBW:
843 NeedGPRC = true;
844 return true;
845 case RISCV::ANDI: {
846 NeedGPRC = true;
847 if (!MI.getOperand(2).isImm())
848 return false;
849 int64_t Imm = MI.getOperand(2).getImm();
850 if (isInt<6>(Imm))
851 return true;
852 // c.zext.b
853 return Subtarget.hasStdExtZcb() && Imm == 255;
854 }
855 case RISCV::SRAI:
856 case RISCV::SRLI:
857 NeedGPRC = true;
858 return true;
859 case RISCV::ADD:
860 case RISCV::SLLI:
861 return true;
862 case RISCV::ADDI:
863 case RISCV::ADDIW:
864 return MI.getOperand(2).isImm() && isInt<6>(MI.getOperand(2).getImm());
865 case RISCV::MUL:
866 case RISCV::SEXT_B:
867 case RISCV::SEXT_H:
868 case RISCV::ZEXT_H_RV32:
869 case RISCV::ZEXT_H_RV64:
870 // c.mul, c.sext.b, c.sext.h, c.zext.h
871 NeedGPRC = true;
872 return Subtarget.hasStdExtZcb();
873 case RISCV::ADD_UW:
874 // c.zext.w
875 NeedGPRC = true;
876 return Subtarget.hasStdExtZcb() && MI.getOperand(2).isReg() &&
877 MI.getOperand(2).getReg() == RISCV::X0;
878 case RISCV::XORI:
879 // c.not
880 NeedGPRC = true;
881 return Subtarget.hasStdExtZcb() && MI.getOperand(2).isImm() &&
882 MI.getOperand(2).getImm() == -1;
883 }
884 };
885
886 // Returns true if this operand is compressible. For non-registers it always
887 // returns true. Immediate range was already checked in isCompressible.
888 // For registers, it checks if the register is a GPRC register. reg-reg
889 // instructions that require GPRC need all register operands to be GPRC.
890 auto isCompressibleOpnd = [&](const MachineOperand &MO) {
891 if (!MO.isReg())
892 return true;
893 Register Reg = MO.getReg();
894 Register PhysReg = Reg.isPhysical() ? Reg : Register(VRM->getPhys(Reg));
895 return PhysReg && RISCV::GPRCRegClass.contains(PhysReg);
896 };
897
898 for (auto &MO : MRI->reg_nodbg_operands(VirtReg)) {
899 const MachineInstr &MI = *MO.getParent();
900 unsigned OpIdx = MO.getOperandNo();
901 bool NeedGPRC;
902 if (isCompressible(MI, NeedGPRC)) {
903 if (OpIdx == 0 && MI.getOperand(1).isReg()) {
904 if (!NeedGPRC || MI.getNumExplicitOperands() < 3 ||
905 MI.getOpcode() == RISCV::ADD_UW ||
906 isCompressibleOpnd(MI.getOperand(2)))
907 tryAddHint(MO, MI.getOperand(1), NeedGPRC);
908 if (MI.isCommutable() && MI.getOperand(2).isReg() &&
909 (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1))))
910 tryAddHint(MO, MI.getOperand(2), NeedGPRC);
911 } else if (OpIdx == 1 && (!NeedGPRC || MI.getNumExplicitOperands() < 3 ||
912 isCompressibleOpnd(MI.getOperand(2)))) {
913 tryAddHint(MO, MI.getOperand(0), NeedGPRC);
914 } else if (MI.isCommutable() && OpIdx == 2 &&
915 (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1)))) {
916 tryAddHint(MO, MI.getOperand(0), NeedGPRC);
917 }
918 }
919 }
920
921 for (MCPhysReg OrderReg : Order)
922 if (TwoAddrHints.count(OrderReg))
923 Hints.push_back(OrderReg);
924
925 return BaseImplRetVal;
926}
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Live Register Matrix
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
static cl::opt< bool > DisableRegAllocHints("riscv-disable-regalloc-hints", cl::Hidden, cl::init(false), cl::desc("Disable two address hints for register " "allocation"))
static cl::opt< bool > DisableCostPerUse("riscv-disable-cost-per-use", cl::init(false), cl::Hidden)
This file declares the machine register scavenger class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallSet class.
static unsigned getDwarfRegNum(unsigned Reg, const TargetRegisterInfo *TRI)
Go up the super-register chain until we hit a valid dwarf register number.
Definition: StackMaps.cpp:195
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
static void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
A debug info location.
Definition: DebugLoc.h:33
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:264
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:675
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int64_t getLocalFrameSize() const
Get the size of the local object blob.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
Definition: MachineInstr.h:69
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool hasBP(const MachineFunction &MF) const
bool hasFP(const MachineFunction &MF) const override
hasFP - Return true if the specified function should have a dedicated frame pointer register.
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
std::optional< unsigned > getRealVLen() const
const RISCVRegisterInfo * getRegisterInfo() const override
const RISCVInstrInfo * getInstrInfo() const override
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:166
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:696
void push_back(const T &Elt)
Definition: SmallVector.h:426
StackOffset holds a fixed and a scalable offset in bytes.
Definition: TypeSize.h:33
int64_t getFixed() const
Returns the fixed component of the stack.
Definition: TypeSize.h:49
static StackOffset get(int64_t Fixed, int64_t Scalable)
Definition: TypeSize.h:44
Information about stack frame layout on the target.
virtual bool hasFP(const MachineFunction &MF) const =0
hasFP - Return true if the specified function should have a dedicated frame pointer register.
TargetInstrInfo - Interface to description of machine instruction set.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM=nullptr, const LiveRegMatrix *Matrix=nullptr) const
Get a list of 'hint' registers that the register allocator should try first when allocating a physica...
virtual const TargetInstrInfo * getInstrInfo() const
MCRegister getPhys(Register virtReg) const
returns the physical register mapped to the specified virtual register
Definition: VirtRegMap.h:105
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ RISCV_VectorCall
Calling convention used for RISC-V V-extension.
Definition: CallingConv.h:268
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
Definition: CallingConv.h:50
@ GRAAL
Used by GraalVM. Two additional registers are reserved.
Definition: CallingConv.h:255
MCRegister getBPReg()
static unsigned getFormat(uint64_t TSFlags)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
bool isRVVSpill(const MachineInstr &MI)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Kill
The last use of a register.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:456
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:324
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
unsigned getKillRegState(bool B)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1879
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override
const TargetRegisterClass * getLargestLegalSuperClass(const TargetRegisterClass *RC, const MachineFunction &) const override
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
void lowerVRELOAD(MachineBasicBlock::iterator II) const
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
Register materializeFrameBaseRegister(MachineBasicBlock *MBB, int FrameIdx, int64_t Offset) const override
RISCVRegisterInfo(unsigned HwMode)
void getOffsetOpcodes(const StackOffset &Offset, SmallVectorImpl< uint64_t > &Ops) const override
bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, int64_t Offset) const override
void lowerVSPILL(MachineBasicBlock::iterator II) const
Register getFrameRegister(const MachineFunction &MF) const override
void adjustReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, Register SrcReg, StackOffset Offset, MachineInstr::MIFlag Flag, MaybeAlign RequiredAlign) const
bool isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const override
const uint32_t * getNoPreservedMask() const override
void resolveFrameIndex(MachineInstr &MI, Register BaseReg, int64_t Offset) const override
bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const override
int64_t getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const override
unsigned getRegisterCostTableIndex(const MachineFunction &MF) const override
bool eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const override