LLVM 19.0.0git
RISCVRegisterInfo.cpp
Go to the documentation of this file.
1//===-- RISCVRegisterInfo.cpp - RISC-V Register Information -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the RISC-V implementation of the TargetRegisterInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "RISCVRegisterInfo.h"
14#include "RISCV.h"
16#include "RISCVSubtarget.h"
17#include "llvm/ADT/SmallSet.h"
27
28#define GET_REGINFO_TARGET_DESC
29#include "RISCVGenRegisterInfo.inc"
30
31using namespace llvm;
32
33static cl::opt<bool>
34 DisableRegAllocHints("riscv-disable-regalloc-hints", cl::Hidden,
35 cl::init(false),
36 cl::desc("Disable two address hints for register "
37 "allocation"));
38
39static_assert(RISCV::X1 == RISCV::X0 + 1, "Register list not consecutive");
40static_assert(RISCV::X31 == RISCV::X0 + 31, "Register list not consecutive");
41static_assert(RISCV::F1_H == RISCV::F0_H + 1, "Register list not consecutive");
42static_assert(RISCV::F31_H == RISCV::F0_H + 31,
43 "Register list not consecutive");
44static_assert(RISCV::F1_F == RISCV::F0_F + 1, "Register list not consecutive");
45static_assert(RISCV::F31_F == RISCV::F0_F + 31,
46 "Register list not consecutive");
47static_assert(RISCV::F1_D == RISCV::F0_D + 1, "Register list not consecutive");
48static_assert(RISCV::F31_D == RISCV::F0_D + 31,
49 "Register list not consecutive");
50static_assert(RISCV::V1 == RISCV::V0 + 1, "Register list not consecutive");
51static_assert(RISCV::V31 == RISCV::V0 + 31, "Register list not consecutive");
52
54 : RISCVGenRegisterInfo(RISCV::X1, /*DwarfFlavour*/0, /*EHFlavor*/0,
55 /*PC*/0, HwMode) {}
56
57const MCPhysReg *
59 auto &Subtarget = MF->getSubtarget<RISCVSubtarget>();
61 return CSR_NoRegs_SaveList;
62 if (MF->getFunction().hasFnAttribute("interrupt")) {
63 if (Subtarget.hasStdExtD())
64 return CSR_XLEN_F64_Interrupt_SaveList;
65 if (Subtarget.hasStdExtF())
66 return Subtarget.isRVE() ? CSR_XLEN_F32_Interrupt_RVE_SaveList
67 : CSR_XLEN_F32_Interrupt_SaveList;
68 return Subtarget.isRVE() ? CSR_Interrupt_RVE_SaveList
69 : CSR_Interrupt_SaveList;
70 }
71
72 switch (Subtarget.getTargetABI()) {
73 default:
74 llvm_unreachable("Unrecognized ABI");
77 return CSR_ILP32E_LP64E_SaveList;
80 return CSR_ILP32_LP64_SaveList;
83 return CSR_ILP32F_LP64F_SaveList;
86 return CSR_ILP32D_LP64D_SaveList;
87 }
88}
89
91 const RISCVFrameLowering *TFI = getFrameLowering(MF);
92 BitVector Reserved(getNumRegs());
93 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
94
95 // Mark any registers requested to be reserved as such
96 for (size_t Reg = 0; Reg < getNumRegs(); Reg++) {
97 if (Subtarget.isRegisterReservedByUser(Reg))
98 markSuperRegs(Reserved, Reg);
99 }
100
101 // Use markSuperRegs to ensure any register aliases are also reserved
102 markSuperRegs(Reserved, RISCV::X0); // zero
103 markSuperRegs(Reserved, RISCV::X2); // sp
104 markSuperRegs(Reserved, RISCV::X3); // gp
105 markSuperRegs(Reserved, RISCV::X4); // tp
106 if (TFI->hasFP(MF))
107 markSuperRegs(Reserved, RISCV::X8); // fp
108 // Reserve the base register if we need to realign the stack and allocate
109 // variable-sized objects at runtime.
110 if (TFI->hasBP(MF))
111 markSuperRegs(Reserved, RISCVABI::getBPReg()); // bp
112
113 // Additionally reserve dummy register used to form the register pair
114 // beginning with 'x0' for instructions that take register pairs.
115 markSuperRegs(Reserved, RISCV::DUMMY_REG_PAIR_WITH_X0);
116
117 // There are only 16 GPRs for RVE.
118 if (Subtarget.isRVE())
119 for (MCPhysReg Reg = RISCV::X16; Reg <= RISCV::X31; Reg++)
120 markSuperRegs(Reserved, Reg);
121
122 // V registers for code generation. We handle them manually.
123 markSuperRegs(Reserved, RISCV::VL);
124 markSuperRegs(Reserved, RISCV::VTYPE);
125 markSuperRegs(Reserved, RISCV::VXSAT);
126 markSuperRegs(Reserved, RISCV::VXRM);
127 markSuperRegs(Reserved, RISCV::VLENB); // vlenb (constant)
128
129 // Floating point environment registers.
130 markSuperRegs(Reserved, RISCV::FRM);
131 markSuperRegs(Reserved, RISCV::FFLAGS);
132
133 // SiFive VCIX state registers.
134 markSuperRegs(Reserved, RISCV::VCIX_STATE);
135
137 if (Subtarget.isRVE())
138 report_fatal_error("Graal reserved registers do not exist in RVE");
139 markSuperRegs(Reserved, RISCV::X23);
140 markSuperRegs(Reserved, RISCV::X27);
141 }
142
143 // Shadow stack pointer.
144 markSuperRegs(Reserved, RISCV::SSP);
145
146 assert(checkAllSuperRegsMarked(Reserved));
147 return Reserved;
148}
149
151 MCRegister PhysReg) const {
152 return !MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(PhysReg);
153}
154
156 return CSR_NoRegs_RegMask;
157}
158
161 const DebugLoc &DL, Register DestReg,
164 MaybeAlign RequiredAlign) const {
165
166 if (DestReg == SrcReg && !Offset.getFixed() && !Offset.getScalable())
167 return;
168
172 const RISCVInstrInfo *TII = ST.getInstrInfo();
173
174 bool KillSrcReg = false;
175
176 if (Offset.getScalable()) {
177 unsigned ScalableAdjOpc = RISCV::ADD;
178 int64_t ScalableValue = Offset.getScalable();
179 if (ScalableValue < 0) {
180 ScalableValue = -ScalableValue;
181 ScalableAdjOpc = RISCV::SUB;
182 }
183 // Get vlenb and multiply vlen with the number of vector registers.
184 Register ScratchReg = DestReg;
185 if (DestReg == SrcReg)
186 ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
187 TII->getVLENFactoredAmount(MF, MBB, II, DL, ScratchReg, ScalableValue, Flag);
188 BuildMI(MBB, II, DL, TII->get(ScalableAdjOpc), DestReg)
189 .addReg(SrcReg).addReg(ScratchReg, RegState::Kill)
190 .setMIFlag(Flag);
191 SrcReg = DestReg;
192 KillSrcReg = true;
193 }
194
195 int64_t Val = Offset.getFixed();
196 if (DestReg == SrcReg && Val == 0)
197 return;
198
199 const uint64_t Align = RequiredAlign.valueOrOne().value();
200
201 if (isInt<12>(Val)) {
202 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
203 .addReg(SrcReg, getKillRegState(KillSrcReg))
204 .addImm(Val)
205 .setMIFlag(Flag);
206 return;
207 }
208
209 // Try to split the offset across two ADDIs. We need to keep the intermediate
210 // result aligned after each ADDI. We need to determine the maximum value we
211 // can put in each ADDI. In the negative direction, we can use -2048 which is
212 // always sufficiently aligned. In the positive direction, we need to find the
213 // largest 12-bit immediate that is aligned. Exclude -4096 since it can be
214 // created with LUI.
215 assert(Align < 2048 && "Required alignment too large");
216 int64_t MaxPosAdjStep = 2048 - Align;
217 if (Val > -4096 && Val <= (2 * MaxPosAdjStep)) {
218 int64_t FirstAdj = Val < 0 ? -2048 : MaxPosAdjStep;
219 Val -= FirstAdj;
220 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
221 .addReg(SrcReg, getKillRegState(KillSrcReg))
222 .addImm(FirstAdj)
223 .setMIFlag(Flag);
224 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
225 .addReg(DestReg, RegState::Kill)
226 .addImm(Val)
227 .setMIFlag(Flag);
228 return;
229 }
230
231 unsigned Opc = RISCV::ADD;
232 if (Val < 0) {
233 Val = -Val;
234 Opc = RISCV::SUB;
235 }
236
237 Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
238 TII->movImm(MBB, II, DL, ScratchReg, Val, Flag);
239 BuildMI(MBB, II, DL, TII->get(Opc), DestReg)
240 .addReg(SrcReg, getKillRegState(KillSrcReg))
241 .addReg(ScratchReg, RegState::Kill)
242 .setMIFlag(Flag);
243}
244
245// Split a VSPILLx_Mx pseudo into multiple whole register stores separated by
246// LMUL*VLENB bytes.
248 DebugLoc DL = II->getDebugLoc();
249 MachineBasicBlock &MBB = *II->getParent();
252 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
253 const TargetInstrInfo *TII = STI.getInstrInfo();
255
256 auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(II->getOpcode());
257 unsigned NF = ZvlssegInfo->first;
258 unsigned LMUL = ZvlssegInfo->second;
259 assert(NF * LMUL <= 8 && "Invalid NF/LMUL combinations.");
260 unsigned Opcode, SubRegIdx;
261 switch (LMUL) {
262 default:
263 llvm_unreachable("LMUL must be 1, 2, or 4.");
264 case 1:
265 Opcode = RISCV::VS1R_V;
266 SubRegIdx = RISCV::sub_vrm1_0;
267 break;
268 case 2:
269 Opcode = RISCV::VS2R_V;
270 SubRegIdx = RISCV::sub_vrm2_0;
271 break;
272 case 4:
273 Opcode = RISCV::VS4R_V;
274 SubRegIdx = RISCV::sub_vrm4_0;
275 break;
276 }
277 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
278 "Unexpected subreg numbering");
279 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
280 "Unexpected subreg numbering");
281 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
282 "Unexpected subreg numbering");
283
284 Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
285 // Optimize for constant VLEN.
286 if (auto VLEN = STI.getRealVLen()) {
287 const int64_t VLENB = *VLEN / 8;
288 int64_t Offset = VLENB * LMUL;
289 STI.getInstrInfo()->movImm(MBB, II, DL, VL, Offset);
290 } else {
291 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL);
292 uint32_t ShiftAmount = Log2_32(LMUL);
293 if (ShiftAmount != 0)
294 BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL)
295 .addReg(VL)
296 .addImm(ShiftAmount);
297 }
298
299 Register SrcReg = II->getOperand(0).getReg();
300 Register Base = II->getOperand(1).getReg();
301 bool IsBaseKill = II->getOperand(1).isKill();
302 Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass);
303 for (unsigned I = 0; I < NF; ++I) {
304 // Adding implicit-use of super register to describe we are using part of
305 // super register, that prevents machine verifier complaining when part of
306 // subreg is undef, see comment in MachineVerifier::checkLiveness for more
307 // detail.
308 BuildMI(MBB, II, DL, TII->get(Opcode))
309 .addReg(TRI->getSubReg(SrcReg, SubRegIdx + I))
310 .addReg(Base, getKillRegState(I == NF - 1))
311 .addMemOperand(*(II->memoperands_begin()))
312 .addReg(SrcReg, RegState::Implicit);
313 if (I != NF - 1)
314 BuildMI(MBB, II, DL, TII->get(RISCV::ADD), NewBase)
315 .addReg(Base, getKillRegState(I != 0 || IsBaseKill))
316 .addReg(VL, getKillRegState(I == NF - 2));
317 Base = NewBase;
318 }
319 II->eraseFromParent();
320}
321
322// Split a VSPILLx_Mx pseudo into multiple whole register loads separated by
323// LMUL*VLENB bytes.
325 DebugLoc DL = II->getDebugLoc();
326 MachineBasicBlock &MBB = *II->getParent();
329 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
330 const TargetInstrInfo *TII = STI.getInstrInfo();
332
333 auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(II->getOpcode());
334 unsigned NF = ZvlssegInfo->first;
335 unsigned LMUL = ZvlssegInfo->second;
336 assert(NF * LMUL <= 8 && "Invalid NF/LMUL combinations.");
337 unsigned Opcode, SubRegIdx;
338 switch (LMUL) {
339 default:
340 llvm_unreachable("LMUL must be 1, 2, or 4.");
341 case 1:
342 Opcode = RISCV::VL1RE8_V;
343 SubRegIdx = RISCV::sub_vrm1_0;
344 break;
345 case 2:
346 Opcode = RISCV::VL2RE8_V;
347 SubRegIdx = RISCV::sub_vrm2_0;
348 break;
349 case 4:
350 Opcode = RISCV::VL4RE8_V;
351 SubRegIdx = RISCV::sub_vrm4_0;
352 break;
353 }
354 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
355 "Unexpected subreg numbering");
356 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
357 "Unexpected subreg numbering");
358 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
359 "Unexpected subreg numbering");
360
361 Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
362 // Optimize for constant VLEN.
363 if (auto VLEN = STI.getRealVLen()) {
364 const int64_t VLENB = *VLEN / 8;
365 int64_t Offset = VLENB * LMUL;
366 STI.getInstrInfo()->movImm(MBB, II, DL, VL, Offset);
367 } else {
368 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL);
369 uint32_t ShiftAmount = Log2_32(LMUL);
370 if (ShiftAmount != 0)
371 BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL)
372 .addReg(VL)
373 .addImm(ShiftAmount);
374 }
375
376 Register DestReg = II->getOperand(0).getReg();
377 Register Base = II->getOperand(1).getReg();
378 bool IsBaseKill = II->getOperand(1).isKill();
379 Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass);
380 for (unsigned I = 0; I < NF; ++I) {
381 BuildMI(MBB, II, DL, TII->get(Opcode),
382 TRI->getSubReg(DestReg, SubRegIdx + I))
383 .addReg(Base, getKillRegState(I == NF - 1))
384 .addMemOperand(*(II->memoperands_begin()));
385 if (I != NF - 1)
386 BuildMI(MBB, II, DL, TII->get(RISCV::ADD), NewBase)
387 .addReg(Base, getKillRegState(I != 0 || IsBaseKill))
388 .addReg(VL, getKillRegState(I == NF - 2));
389 Base = NewBase;
390 }
391 II->eraseFromParent();
392}
393
395 int SPAdj, unsigned FIOperandNum,
396 RegScavenger *RS) const {
397 assert(SPAdj == 0 && "Unexpected non-zero SPAdj value");
398
399 MachineInstr &MI = *II;
400 MachineFunction &MF = *MI.getParent()->getParent();
403 DebugLoc DL = MI.getDebugLoc();
404
405 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
406 Register FrameReg;
408 getFrameLowering(MF)->getFrameIndexReference(MF, FrameIndex, FrameReg);
409 bool IsRVVSpill = RISCV::isRVVSpill(MI);
410 if (!IsRVVSpill)
411 Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
412
413 if (Offset.getScalable() &&
414 ST.getRealMinVLen() == ST.getRealMaxVLen()) {
415 // For an exact VLEN value, scalable offsets become constant and thus
416 // can be converted entirely into fixed offsets.
417 int64_t FixedValue = Offset.getFixed();
418 int64_t ScalableValue = Offset.getScalable();
419 assert(ScalableValue % 8 == 0 &&
420 "Scalable offset is not a multiple of a single vector size.");
421 int64_t NumOfVReg = ScalableValue / 8;
422 int64_t VLENB = ST.getRealMinVLen() / 8;
423 Offset = StackOffset::getFixed(FixedValue + NumOfVReg * VLENB);
424 }
425
426 if (!isInt<32>(Offset.getFixed())) {
428 "Frame offsets outside of the signed 32-bit range not supported");
429 }
430
431 if (!IsRVVSpill) {
432 if (MI.getOpcode() == RISCV::ADDI && !isInt<12>(Offset.getFixed())) {
433 // We chose to emit the canonical immediate sequence rather than folding
434 // the offset into the using add under the theory that doing so doesn't
435 // save dynamic instruction count and some target may fuse the canonical
436 // 32 bit immediate sequence. We still need to clear the portion of the
437 // offset encoded in the immediate.
438 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
439 } else {
440 // We can encode an add with 12 bit signed immediate in the immediate
441 // operand of our user instruction. As a result, the remaining
442 // offset can by construction, at worst, a LUI and a ADD.
443 int64_t Val = Offset.getFixed();
444 int64_t Lo12 = SignExtend64<12>(Val);
445 if ((MI.getOpcode() == RISCV::PREFETCH_I ||
446 MI.getOpcode() == RISCV::PREFETCH_R ||
447 MI.getOpcode() == RISCV::PREFETCH_W) &&
448 (Lo12 & 0b11111) != 0)
449 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
450 else {
451 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Lo12);
453 Offset.getScalable());
454 }
455 }
456 }
457
458 if (Offset.getScalable() || Offset.getFixed()) {
459 Register DestReg;
460 if (MI.getOpcode() == RISCV::ADDI)
461 DestReg = MI.getOperand(0).getReg();
462 else
463 DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
464 adjustReg(*II->getParent(), II, DL, DestReg, FrameReg, Offset,
465 MachineInstr::NoFlags, std::nullopt);
466 MI.getOperand(FIOperandNum).ChangeToRegister(DestReg, /*IsDef*/false,
467 /*IsImp*/false,
468 /*IsKill*/true);
469 } else {
470 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, /*IsDef*/false,
471 /*IsImp*/false,
472 /*IsKill*/false);
473 }
474
475 // If after materializing the adjustment, we have a pointless ADDI, remove it
476 if (MI.getOpcode() == RISCV::ADDI &&
477 MI.getOperand(0).getReg() == MI.getOperand(1).getReg() &&
478 MI.getOperand(2).getImm() == 0) {
479 MI.eraseFromParent();
480 return true;
481 }
482
483 // Handle spill/fill of synthetic register classes for segment operations to
484 // ensure correctness in the edge case one gets spilled. There are many
485 // possible optimizations here, but given the extreme rarity of such spills,
486 // we prefer simplicity of implementation for now.
487 switch (MI.getOpcode()) {
488 case RISCV::PseudoVSPILL2_M1:
489 case RISCV::PseudoVSPILL2_M2:
490 case RISCV::PseudoVSPILL2_M4:
491 case RISCV::PseudoVSPILL3_M1:
492 case RISCV::PseudoVSPILL3_M2:
493 case RISCV::PseudoVSPILL4_M1:
494 case RISCV::PseudoVSPILL4_M2:
495 case RISCV::PseudoVSPILL5_M1:
496 case RISCV::PseudoVSPILL6_M1:
497 case RISCV::PseudoVSPILL7_M1:
498 case RISCV::PseudoVSPILL8_M1:
499 lowerVSPILL(II);
500 return true;
501 case RISCV::PseudoVRELOAD2_M1:
502 case RISCV::PseudoVRELOAD2_M2:
503 case RISCV::PseudoVRELOAD2_M4:
504 case RISCV::PseudoVRELOAD3_M1:
505 case RISCV::PseudoVRELOAD3_M2:
506 case RISCV::PseudoVRELOAD4_M1:
507 case RISCV::PseudoVRELOAD4_M2:
508 case RISCV::PseudoVRELOAD5_M1:
509 case RISCV::PseudoVRELOAD6_M1:
510 case RISCV::PseudoVRELOAD7_M1:
511 case RISCV::PseudoVRELOAD8_M1:
512 lowerVRELOAD(II);
513 return true;
514 }
515
516 return false;
517}
518
520 const MachineFunction &MF) const {
521 return true;
522}
523
524// Returns true if the instruction's frame index reference would be better
525// served by a base register other than FP or SP.
526// Used by LocalStackSlotAllocation pass to determine which frame index
527// references it should create new base registers for.
529 int64_t Offset) const {
530 unsigned FIOperandNum = 0;
531 for (; !MI->getOperand(FIOperandNum).isFI(); FIOperandNum++)
532 assert(FIOperandNum < MI->getNumOperands() &&
533 "Instr doesn't have FrameIndex operand");
534
535 // For RISC-V, The machine instructions that include a FrameIndex operand
536 // are load/store, ADDI instructions.
537 unsigned MIFrm = RISCVII::getFormat(MI->getDesc().TSFlags);
538 if (MIFrm != RISCVII::InstFormatI && MIFrm != RISCVII::InstFormatS)
539 return false;
540 // We only generate virtual base registers for loads and stores, so
541 // return false for everything else.
542 if (!MI->mayLoad() && !MI->mayStore())
543 return false;
544
545 const MachineFunction &MF = *MI->getMF();
546 const MachineFrameInfo &MFI = MF.getFrameInfo();
547 const RISCVFrameLowering *TFI = getFrameLowering(MF);
548 const MachineRegisterInfo &MRI = MF.getRegInfo();
549 unsigned CalleeSavedSize = 0;
550 Offset += getFrameIndexInstrOffset(MI, FIOperandNum);
551
552 // Estimate the stack size used to store callee saved registers(
553 // excludes reserved registers).
554 BitVector ReservedRegs = getReservedRegs(MF);
555 for (const MCPhysReg *R = MRI.getCalleeSavedRegs(); MCPhysReg Reg = *R; ++R) {
556 if (!ReservedRegs.test(Reg))
557 CalleeSavedSize += getSpillSize(*getMinimalPhysRegClass(Reg));
558 }
559
560 int64_t MaxFPOffset = Offset - CalleeSavedSize;
561 if (TFI->hasFP(MF) && !shouldRealignStack(MF))
562 return !isFrameOffsetLegal(MI, RISCV::X8, MaxFPOffset);
563
564 // Assume 128 bytes spill slots size to estimate the maximum possible
565 // offset relative to the stack pointer.
566 // FIXME: The 128 is copied from ARM. We should run some statistics and pick a
567 // real one for RISC-V.
568 int64_t MaxSPOffset = Offset + 128;
569 MaxSPOffset += MFI.getLocalFrameSize();
570 return !isFrameOffsetLegal(MI, RISCV::X2, MaxSPOffset);
571}
572
573// Determine whether a given base register plus offset immediate is
574// encodable to resolve a frame index.
576 Register BaseReg,
577 int64_t Offset) const {
578 unsigned FIOperandNum = 0;
579 while (!MI->getOperand(FIOperandNum).isFI()) {
580 FIOperandNum++;
581 assert(FIOperandNum < MI->getNumOperands() &&
582 "Instr does not have a FrameIndex operand!");
583 }
584
585 Offset += getFrameIndexInstrOffset(MI, FIOperandNum);
586 return isInt<12>(Offset);
587}
588
589// Insert defining instruction(s) for a pointer to FrameIdx before
590// insertion point I.
591// Return materialized frame pointer.
593 int FrameIdx,
594 int64_t Offset) const {
596 DebugLoc DL;
597 if (MBBI != MBB->end())
598 DL = MBBI->getDebugLoc();
600 MachineRegisterInfo &MFI = MF->getRegInfo();
602
603 Register BaseReg = MFI.createVirtualRegister(&RISCV::GPRRegClass);
604 BuildMI(*MBB, MBBI, DL, TII->get(RISCV::ADDI), BaseReg)
605 .addFrameIndex(FrameIdx)
606 .addImm(Offset);
607 return BaseReg;
608}
609
610// Resolve a frame index operand of an instruction to reference the
611// indicated base register plus offset instead.
613 int64_t Offset) const {
614 unsigned FIOperandNum = 0;
615 while (!MI.getOperand(FIOperandNum).isFI()) {
616 FIOperandNum++;
617 assert(FIOperandNum < MI.getNumOperands() &&
618 "Instr does not have a FrameIndex operand!");
619 }
620
621 Offset += getFrameIndexInstrOffset(&MI, FIOperandNum);
622 // FrameIndex Operands are always represented as a
623 // register followed by an immediate.
624 MI.getOperand(FIOperandNum).ChangeToRegister(BaseReg, false);
625 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
626}
627
628// Get the offset from the referenced frame index in the instruction,
629// if there is one.
631 int Idx) const {
632 assert((RISCVII::getFormat(MI->getDesc().TSFlags) == RISCVII::InstFormatI ||
633 RISCVII::getFormat(MI->getDesc().TSFlags) == RISCVII::InstFormatS) &&
634 "The MI must be I or S format.");
635 assert(MI->getOperand(Idx).isFI() && "The Idx'th operand of MI is not a "
636 "FrameIndex operand");
637 return MI->getOperand(Idx + 1).getImm();
638}
639
641 const TargetFrameLowering *TFI = getFrameLowering(MF);
642 return TFI->hasFP(MF) ? RISCV::X8 : RISCV::X2;
643}
644
645const uint32_t *
647 CallingConv::ID CC) const {
648 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
649
650 if (CC == CallingConv::GHC)
651 return CSR_NoRegs_RegMask;
652 switch (Subtarget.getTargetABI()) {
653 default:
654 llvm_unreachable("Unrecognized ABI");
657 return CSR_ILP32E_LP64E_RegMask;
660 return CSR_ILP32_LP64_RegMask;
663 return CSR_ILP32F_LP64F_RegMask;
666 return CSR_ILP32D_LP64D_RegMask;
667 }
668}
669
672 const MachineFunction &) const {
673 if (RC == &RISCV::VMV0RegClass)
674 return &RISCV::VRRegClass;
675 if (RC == &RISCV::VRNoV0RegClass)
676 return &RISCV::VRRegClass;
677 if (RC == &RISCV::VRM2NoV0RegClass)
678 return &RISCV::VRM2RegClass;
679 if (RC == &RISCV::VRM4NoV0RegClass)
680 return &RISCV::VRM4RegClass;
681 if (RC == &RISCV::VRM8NoV0RegClass)
682 return &RISCV::VRM8RegClass;
683 return RC;
684}
685
687 SmallVectorImpl<uint64_t> &Ops) const {
688 // VLENB is the length of a vector register in bytes. We use <vscale x 8 x i8>
689 // to represent one vector register. The dwarf offset is
690 // VLENB * scalable_offset / 8.
691 assert(Offset.getScalable() % 8 == 0 && "Invalid frame offset");
692
693 // Add fixed-sized offset using existing DIExpression interface.
694 DIExpression::appendOffset(Ops, Offset.getFixed());
695
696 unsigned VLENB = getDwarfRegNum(RISCV::VLENB, true);
697 int64_t VLENBSized = Offset.getScalable() / 8;
698 if (VLENBSized > 0) {
699 Ops.push_back(dwarf::DW_OP_constu);
700 Ops.push_back(VLENBSized);
701 Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL});
702 Ops.push_back(dwarf::DW_OP_mul);
703 Ops.push_back(dwarf::DW_OP_plus);
704 } else if (VLENBSized < 0) {
705 Ops.push_back(dwarf::DW_OP_constu);
706 Ops.push_back(-VLENBSized);
707 Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL});
708 Ops.push_back(dwarf::DW_OP_mul);
709 Ops.push_back(dwarf::DW_OP_minus);
710 }
711}
712
713unsigned
715 return MF.getSubtarget<RISCVSubtarget>().hasStdExtCOrZca() ? 1 : 0;
716}
717
718// Add two address hints to improve chances of being able to use a compressed
719// instruction.
721 Register VirtReg, ArrayRef<MCPhysReg> Order,
723 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
724 const MachineRegisterInfo *MRI = &MF.getRegInfo();
725 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
726
727 bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints(
728 VirtReg, Order, Hints, MF, VRM, Matrix);
729
730 if (!VRM || DisableRegAllocHints)
731 return BaseImplRetVal;
732
733 // Add any two address hints after any copy hints.
734 SmallSet<Register, 4> TwoAddrHints;
735
736 auto tryAddHint = [&](const MachineOperand &VRRegMO, const MachineOperand &MO,
737 bool NeedGPRC) -> void {
738 Register Reg = MO.getReg();
739 Register PhysReg = Reg.isPhysical() ? Reg : Register(VRM->getPhys(Reg));
740 if (PhysReg && (!NeedGPRC || RISCV::GPRCRegClass.contains(PhysReg))) {
741 assert(!MO.getSubReg() && !VRRegMO.getSubReg() && "Unexpected subreg!");
742 if (!MRI->isReserved(PhysReg) && !is_contained(Hints, PhysReg))
743 TwoAddrHints.insert(PhysReg);
744 }
745 };
746
747 // This is all of the compressible binary instructions. If an instruction
748 // needs GPRC register class operands \p NeedGPRC will be set to true.
749 auto isCompressible = [&Subtarget](const MachineInstr &MI, bool &NeedGPRC) {
750 NeedGPRC = false;
751 switch (MI.getOpcode()) {
752 default:
753 return false;
754 case RISCV::AND:
755 case RISCV::OR:
756 case RISCV::XOR:
757 case RISCV::SUB:
758 case RISCV::ADDW:
759 case RISCV::SUBW:
760 NeedGPRC = true;
761 return true;
762 case RISCV::ANDI: {
763 NeedGPRC = true;
764 if (!MI.getOperand(2).isImm())
765 return false;
766 int64_t Imm = MI.getOperand(2).getImm();
767 if (isInt<6>(Imm))
768 return true;
769 // c.zext.b
770 return Subtarget.hasStdExtZcb() && Imm == 255;
771 }
772 case RISCV::SRAI:
773 case RISCV::SRLI:
774 NeedGPRC = true;
775 return true;
776 case RISCV::ADD:
777 case RISCV::SLLI:
778 return true;
779 case RISCV::ADDI:
780 case RISCV::ADDIW:
781 return MI.getOperand(2).isImm() && isInt<6>(MI.getOperand(2).getImm());
782 case RISCV::MUL:
783 case RISCV::SEXT_B:
784 case RISCV::SEXT_H:
785 case RISCV::ZEXT_H_RV32:
786 case RISCV::ZEXT_H_RV64:
787 // c.mul, c.sext.b, c.sext.h, c.zext.h
788 NeedGPRC = true;
789 return Subtarget.hasStdExtZcb();
790 case RISCV::ADD_UW:
791 // c.zext.w
792 NeedGPRC = true;
793 return Subtarget.hasStdExtZcb() && MI.getOperand(2).isReg() &&
794 MI.getOperand(2).getReg() == RISCV::X0;
795 case RISCV::XORI:
796 // c.not
797 NeedGPRC = true;
798 return Subtarget.hasStdExtZcb() && MI.getOperand(2).isImm() &&
799 MI.getOperand(2).getImm() == -1;
800 }
801 };
802
803 // Returns true if this operand is compressible. For non-registers it always
804 // returns true. Immediate range was already checked in isCompressible.
805 // For registers, it checks if the register is a GPRC register. reg-reg
806 // instructions that require GPRC need all register operands to be GPRC.
807 auto isCompressibleOpnd = [&](const MachineOperand &MO) {
808 if (!MO.isReg())
809 return true;
810 Register Reg = MO.getReg();
811 Register PhysReg = Reg.isPhysical() ? Reg : Register(VRM->getPhys(Reg));
812 return PhysReg && RISCV::GPRCRegClass.contains(PhysReg);
813 };
814
815 for (auto &MO : MRI->reg_nodbg_operands(VirtReg)) {
816 const MachineInstr &MI = *MO.getParent();
817 unsigned OpIdx = MO.getOperandNo();
818 bool NeedGPRC;
819 if (isCompressible(MI, NeedGPRC)) {
820 if (OpIdx == 0 && MI.getOperand(1).isReg()) {
821 if (!NeedGPRC || MI.getNumExplicitOperands() < 3 ||
822 MI.getOpcode() == RISCV::ADD_UW ||
823 isCompressibleOpnd(MI.getOperand(2)))
824 tryAddHint(MO, MI.getOperand(1), NeedGPRC);
825 if (MI.isCommutable() && MI.getOperand(2).isReg() &&
826 (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1))))
827 tryAddHint(MO, MI.getOperand(2), NeedGPRC);
828 } else if (OpIdx == 1 && (!NeedGPRC || MI.getNumExplicitOperands() < 3 ||
829 isCompressibleOpnd(MI.getOperand(2)))) {
830 tryAddHint(MO, MI.getOperand(0), NeedGPRC);
831 } else if (MI.isCommutable() && OpIdx == 2 &&
832 (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1)))) {
833 tryAddHint(MO, MI.getOperand(0), NeedGPRC);
834 }
835 }
836 }
837
838 for (MCPhysReg OrderReg : Order)
839 if (TwoAddrHints.count(OrderReg))
840 Hints.push_back(OrderReg);
841
842 return BaseImplRetVal;
843}
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Live Register Matrix
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
static cl::opt< bool > DisableRegAllocHints("riscv-disable-regalloc-hints", cl::Hidden, cl::init(false), cl::desc("Disable two address hints for register " "allocation"))
This file declares the machine register scavenger class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallSet class.
static unsigned getDwarfRegNum(unsigned Reg, const TargetRegisterInfo *TRI)
Go up the super-register chain until we hit a valid dwarf register number.
Definition: StackMaps.cpp:195
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool test(unsigned Idx) const
Definition: BitVector.h:461
static void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
A debug info location.
Definition: DebugLoc.h:33
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:262
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:669
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int64_t getLocalFrameSize() const
Get the size of the local object blob.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
Definition: MachineInstr.h:69
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool hasBP(const MachineFunction &MF) const
bool hasFP(const MachineFunction &MF) const override
hasFP - Return true if the specified function should have a dedicated frame pointer register.
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
std::optional< unsigned > getRealVLen() const
const RISCVRegisterInfo * getRegisterInfo() const override
const RISCVInstrInfo * getInstrInfo() const override
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:166
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:696
void push_back(const T &Elt)
Definition: SmallVector.h:426
StackOffset holds a fixed and a scalable offset in bytes.
Definition: TypeSize.h:33
int64_t getFixed() const
Returns the fixed component of the stack.
Definition: TypeSize.h:49
static StackOffset get(int64_t Fixed, int64_t Scalable)
Definition: TypeSize.h:44
Information about stack frame layout on the target.
virtual bool hasFP(const MachineFunction &MF) const =0
hasFP - Return true if the specified function should have a dedicated frame pointer register.
TargetInstrInfo - Interface to description of machine instruction set.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM=nullptr, const LiveRegMatrix *Matrix=nullptr) const
Get a list of 'hint' registers that the register allocator should try first when allocating a physica...
virtual const TargetInstrInfo * getInstrInfo() const
MCRegister getPhys(Register virtReg) const
returns the physical register mapped to the specified virtual register
Definition: VirtRegMap.h:105
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
Definition: CallingConv.h:50
@ GRAAL
Used by GraalVM. Two additional registers are reserved.
Definition: CallingConv.h:255
MCRegister getBPReg()
static unsigned getFormat(uint64_t TSFlags)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
bool isRVVSpill(const MachineInstr &MI)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Kill
The last use of a register.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:456
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:313
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
unsigned getKillRegState(bool B)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1888
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override
const TargetRegisterClass * getLargestLegalSuperClass(const TargetRegisterClass *RC, const MachineFunction &) const override
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
void lowerVRELOAD(MachineBasicBlock::iterator II) const
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
Register materializeFrameBaseRegister(MachineBasicBlock *MBB, int FrameIdx, int64_t Offset) const override
RISCVRegisterInfo(unsigned HwMode)
void getOffsetOpcodes(const StackOffset &Offset, SmallVectorImpl< uint64_t > &Ops) const override
bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, int64_t Offset) const override
void lowerVSPILL(MachineBasicBlock::iterator II) const
Register getFrameRegister(const MachineFunction &MF) const override
void adjustReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, Register SrcReg, StackOffset Offset, MachineInstr::MIFlag Flag, MaybeAlign RequiredAlign) const
bool isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const override
const uint32_t * getNoPreservedMask() const override
void resolveFrameIndex(MachineInstr &MI, Register BaseReg, int64_t Offset) const override
bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const override
int64_t getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const override
unsigned getRegisterCostTableIndex(const MachineFunction &MF) const override
bool eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const override