LLVM 17.0.0git
RISCVRegisterInfo.cpp
Go to the documentation of this file.
1//===-- RISCVRegisterInfo.cpp - RISC-V Register Information -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the RISC-V implementation of the TargetRegisterInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "RISCVRegisterInfo.h"
14#include "RISCV.h"
16#include "RISCVSubtarget.h"
26
27#define GET_REGINFO_TARGET_DESC
28#include "RISCVGenRegisterInfo.inc"
29
30using namespace llvm;
31
32static cl::opt<bool>
33 DisableRegAllocHints("riscv-disable-regalloc-hints", cl::Hidden,
34 cl::init(false),
35 cl::desc("Disable two address hints for register "
36 "allocation"));
37
38static_assert(RISCV::X1 == RISCV::X0 + 1, "Register list not consecutive");
39static_assert(RISCV::X31 == RISCV::X0 + 31, "Register list not consecutive");
40static_assert(RISCV::F1_H == RISCV::F0_H + 1, "Register list not consecutive");
41static_assert(RISCV::F31_H == RISCV::F0_H + 31,
42 "Register list not consecutive");
43static_assert(RISCV::F1_F == RISCV::F0_F + 1, "Register list not consecutive");
44static_assert(RISCV::F31_F == RISCV::F0_F + 31,
45 "Register list not consecutive");
46static_assert(RISCV::F1_D == RISCV::F0_D + 1, "Register list not consecutive");
47static_assert(RISCV::F31_D == RISCV::F0_D + 31,
48 "Register list not consecutive");
49static_assert(RISCV::V1 == RISCV::V0 + 1, "Register list not consecutive");
50static_assert(RISCV::V31 == RISCV::V0 + 31, "Register list not consecutive");
51
53 : RISCVGenRegisterInfo(RISCV::X1, /*DwarfFlavour*/0, /*EHFlavor*/0,
54 /*PC*/0, HwMode) {}
55
56const MCPhysReg *
58 auto &Subtarget = MF->getSubtarget<RISCVSubtarget>();
60 return CSR_NoRegs_SaveList;
61 if (MF->getFunction().hasFnAttribute("interrupt")) {
62 if (Subtarget.hasStdExtD())
63 return CSR_XLEN_F64_Interrupt_SaveList;
64 if (Subtarget.hasStdExtF())
65 return CSR_XLEN_F32_Interrupt_SaveList;
66 return CSR_Interrupt_SaveList;
67 }
68
69 switch (Subtarget.getTargetABI()) {
70 default:
71 llvm_unreachable("Unrecognized ABI");
74 return CSR_ILP32_LP64_SaveList;
77 return CSR_ILP32F_LP64F_SaveList;
80 return CSR_ILP32D_LP64D_SaveList;
81 }
82}
83
85 const RISCVFrameLowering *TFI = getFrameLowering(MF);
86 BitVector Reserved(getNumRegs());
87
88 // Mark any registers requested to be reserved as such
89 for (size_t Reg = 0; Reg < getNumRegs(); Reg++) {
91 markSuperRegs(Reserved, Reg);
92 }
93
94 // Use markSuperRegs to ensure any register aliases are also reserved
95 markSuperRegs(Reserved, RISCV::X0); // zero
96 markSuperRegs(Reserved, RISCV::X2); // sp
97 markSuperRegs(Reserved, RISCV::X3); // gp
98 markSuperRegs(Reserved, RISCV::X4); // tp
99 if (TFI->hasFP(MF))
100 markSuperRegs(Reserved, RISCV::X8); // fp
101 // Reserve the base register if we need to realign the stack and allocate
102 // variable-sized objects at runtime.
103 if (TFI->hasBP(MF))
104 markSuperRegs(Reserved, RISCVABI::getBPReg()); // bp
105
106 // V registers for code generation. We handle them manually.
107 markSuperRegs(Reserved, RISCV::VL);
108 markSuperRegs(Reserved, RISCV::VTYPE);
109 markSuperRegs(Reserved, RISCV::VXSAT);
110 markSuperRegs(Reserved, RISCV::VXRM);
111 markSuperRegs(Reserved, RISCV::VLENB); // vlenb (constant)
112
113 // Floating point environment registers.
114 markSuperRegs(Reserved, RISCV::FRM);
115 markSuperRegs(Reserved, RISCV::FFLAGS);
116
117 assert(checkAllSuperRegsMarked(Reserved));
118 return Reserved;
119}
120
122 MCRegister PhysReg) const {
123 return !MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(PhysReg);
124}
125
127 return CSR_NoRegs_RegMask;
128}
129
130// Frame indexes representing locations of CSRs which are given a fixed location
131// by save/restore libcalls.
132static const std::pair<unsigned, int> FixedCSRFIMap[] = {
133 {/*ra*/ RISCV::X1, -1},
134 {/*s0*/ RISCV::X8, -2},
135 {/*s1*/ RISCV::X9, -3},
136 {/*s2*/ RISCV::X18, -4},
137 {/*s3*/ RISCV::X19, -5},
138 {/*s4*/ RISCV::X20, -6},
139 {/*s5*/ RISCV::X21, -7},
140 {/*s6*/ RISCV::X22, -8},
141 {/*s7*/ RISCV::X23, -9},
142 {/*s8*/ RISCV::X24, -10},
143 {/*s9*/ RISCV::X25, -11},
144 {/*s10*/ RISCV::X26, -12},
145 {/*s11*/ RISCV::X27, -13}
146};
147
149 Register Reg,
150 int &FrameIdx) const {
151 const auto *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
152 if (!RVFI->useSaveRestoreLibCalls(MF))
153 return false;
154
155 const auto *FII =
156 llvm::find_if(FixedCSRFIMap, [&](auto P) { return P.first == Reg; });
157 if (FII == std::end(FixedCSRFIMap))
158 return false;
159
160 FrameIdx = FII->second;
161 return true;
162}
163
166 const DebugLoc &DL, Register DestReg,
169 MaybeAlign RequiredAlign) const {
170
171 if (DestReg == SrcReg && !Offset.getFixed() && !Offset.getScalable())
172 return;
173
177 const RISCVInstrInfo *TII = ST.getInstrInfo();
178
179 bool KillSrcReg = false;
180
181 if (Offset.getScalable()) {
182 unsigned ScalableAdjOpc = RISCV::ADD;
183 int64_t ScalableValue = Offset.getScalable();
184 if (ScalableValue < 0) {
185 ScalableValue = -ScalableValue;
186 ScalableAdjOpc = RISCV::SUB;
187 }
188 // Get vlenb and multiply vlen with the number of vector registers.
189 Register ScratchReg = DestReg;
190 if (DestReg == SrcReg)
191 ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
192 TII->getVLENFactoredAmount(MF, MBB, II, DL, ScratchReg, ScalableValue, Flag);
193 BuildMI(MBB, II, DL, TII->get(ScalableAdjOpc), DestReg)
194 .addReg(SrcReg).addReg(ScratchReg, RegState::Kill)
195 .setMIFlag(Flag);
196 SrcReg = DestReg;
197 KillSrcReg = true;
198 }
199
200 int64_t Val = Offset.getFixed();
201 if (DestReg == SrcReg && Val == 0)
202 return;
203
204 const uint64_t Align = RequiredAlign.valueOrOne().value();
205
206 if (isInt<12>(Val)) {
207 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
208 .addReg(SrcReg, getKillRegState(KillSrcReg))
209 .addImm(Val)
210 .setMIFlag(Flag);
211 return;
212 }
213
214 // Try to split the offset across two ADDIs. We need to keep the intermediate
215 // result aligned after each ADDI. We need to determine the maximum value we
216 // can put in each ADDI. In the negative direction, we can use -2048 which is
217 // always sufficiently aligned. In the positive direction, we need to find the
218 // largest 12-bit immediate that is aligned. Exclude -4096 since it can be
219 // created with LUI.
220 assert(Align < 2048 && "Required alignment too large");
221 int64_t MaxPosAdjStep = 2048 - Align;
222 if (Val > -4096 && Val <= (2 * MaxPosAdjStep)) {
223 int64_t FirstAdj = Val < 0 ? -2048 : MaxPosAdjStep;
224 Val -= FirstAdj;
225 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
226 .addReg(SrcReg, getKillRegState(KillSrcReg))
227 .addImm(FirstAdj)
228 .setMIFlag(Flag);
229 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
230 .addReg(DestReg, RegState::Kill)
231 .addImm(Val)
232 .setMIFlag(Flag);
233 return;
234 }
235
236 unsigned Opc = RISCV::ADD;
237 if (Val < 0) {
238 Val = -Val;
239 Opc = RISCV::SUB;
240 }
241
242 Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
243 TII->movImm(MBB, II, DL, ScratchReg, Val, Flag);
244 BuildMI(MBB, II, DL, TII->get(Opc), DestReg)
245 .addReg(SrcReg, getKillRegState(KillSrcReg))
246 .addReg(ScratchReg, RegState::Kill)
247 .setMIFlag(Flag);
248}
249
250// Split a VSPILLx_Mx pseudo into multiple whole register stores separated by
251// LMUL*VLENB bytes.
253 DebugLoc DL = II->getDebugLoc();
254 MachineBasicBlock &MBB = *II->getParent();
259
260 auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(II->getOpcode());
261 unsigned NF = ZvlssegInfo->first;
262 unsigned LMUL = ZvlssegInfo->second;
263 assert(NF * LMUL <= 8 && "Invalid NF/LMUL combinations.");
264 unsigned Opcode, SubRegIdx;
265 switch (LMUL) {
266 default:
267 llvm_unreachable("LMUL must be 1, 2, or 4.");
268 case 1:
269 Opcode = RISCV::VS1R_V;
270 SubRegIdx = RISCV::sub_vrm1_0;
271 break;
272 case 2:
273 Opcode = RISCV::VS2R_V;
274 SubRegIdx = RISCV::sub_vrm2_0;
275 break;
276 case 4:
277 Opcode = RISCV::VS4R_V;
278 SubRegIdx = RISCV::sub_vrm4_0;
279 break;
280 }
281 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
282 "Unexpected subreg numbering");
283 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
284 "Unexpected subreg numbering");
285 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
286 "Unexpected subreg numbering");
287
288 Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
289 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL);
290 uint32_t ShiftAmount = Log2_32(LMUL);
291 if (ShiftAmount != 0)
292 BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL)
293 .addReg(VL)
294 .addImm(ShiftAmount);
295
296 Register SrcReg = II->getOperand(0).getReg();
297 Register Base = II->getOperand(1).getReg();
298 bool IsBaseKill = II->getOperand(1).isKill();
299 Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass);
300 for (unsigned I = 0; I < NF; ++I) {
301 // Adding implicit-use of super register to describe we are using part of
302 // super register, that prevents machine verifier complaining when part of
303 // subreg is undef, see comment in MachineVerifier::checkLiveness for more
304 // detail.
305 BuildMI(MBB, II, DL, TII->get(Opcode))
306 .addReg(TRI->getSubReg(SrcReg, SubRegIdx + I))
307 .addReg(Base, getKillRegState(I == NF - 1))
308 .addMemOperand(*(II->memoperands_begin()))
309 .addReg(SrcReg, RegState::Implicit);
310 if (I != NF - 1)
311 BuildMI(MBB, II, DL, TII->get(RISCV::ADD), NewBase)
312 .addReg(Base, getKillRegState(I != 0 || IsBaseKill))
313 .addReg(VL, getKillRegState(I == NF - 2));
314 Base = NewBase;
315 }
316 II->eraseFromParent();
317}
318
319// Split a VSPILLx_Mx pseudo into multiple whole register loads separated by
320// LMUL*VLENB bytes.
322 DebugLoc DL = II->getDebugLoc();
323 MachineBasicBlock &MBB = *II->getParent();
328
329 auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(II->getOpcode());
330 unsigned NF = ZvlssegInfo->first;
331 unsigned LMUL = ZvlssegInfo->second;
332 assert(NF * LMUL <= 8 && "Invalid NF/LMUL combinations.");
333 unsigned Opcode, SubRegIdx;
334 switch (LMUL) {
335 default:
336 llvm_unreachable("LMUL must be 1, 2, or 4.");
337 case 1:
338 Opcode = RISCV::VL1RE8_V;
339 SubRegIdx = RISCV::sub_vrm1_0;
340 break;
341 case 2:
342 Opcode = RISCV::VL2RE8_V;
343 SubRegIdx = RISCV::sub_vrm2_0;
344 break;
345 case 4:
346 Opcode = RISCV::VL4RE8_V;
347 SubRegIdx = RISCV::sub_vrm4_0;
348 break;
349 }
350 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
351 "Unexpected subreg numbering");
352 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
353 "Unexpected subreg numbering");
354 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
355 "Unexpected subreg numbering");
356
357 Register VL = MRI.createVirtualRegister(&RISCV::GPRRegClass);
358 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VL);
359 uint32_t ShiftAmount = Log2_32(LMUL);
360 if (ShiftAmount != 0)
361 BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), VL)
362 .addReg(VL)
363 .addImm(ShiftAmount);
364
365 Register DestReg = II->getOperand(0).getReg();
366 Register Base = II->getOperand(1).getReg();
367 bool IsBaseKill = II->getOperand(1).isKill();
368 Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass);
369 for (unsigned I = 0; I < NF; ++I) {
370 BuildMI(MBB, II, DL, TII->get(Opcode),
371 TRI->getSubReg(DestReg, SubRegIdx + I))
372 .addReg(Base, getKillRegState(I == NF - 1))
373 .addMemOperand(*(II->memoperands_begin()));
374 if (I != NF - 1)
375 BuildMI(MBB, II, DL, TII->get(RISCV::ADD), NewBase)
376 .addReg(Base, getKillRegState(I != 0 || IsBaseKill))
377 .addReg(VL, getKillRegState(I == NF - 2));
378 Base = NewBase;
379 }
380 II->eraseFromParent();
381}
382
384 int SPAdj, unsigned FIOperandNum,
385 RegScavenger *RS) const {
386 assert(SPAdj == 0 && "Unexpected non-zero SPAdj value");
387
388 MachineInstr &MI = *II;
389 MachineFunction &MF = *MI.getParent()->getParent();
392 DebugLoc DL = MI.getDebugLoc();
393
394 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
395 Register FrameReg;
397 getFrameLowering(MF)->getFrameIndexReference(MF, FrameIndex, FrameReg);
398 bool IsRVVSpill = RISCV::isRVVSpill(MI);
399 if (!IsRVVSpill)
400 Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
401
402 if (Offset.getScalable() &&
403 ST.getRealMinVLen() == ST.getRealMaxVLen()) {
404 // For an exact VLEN value, scalable offsets become constant and thus
405 // can be converted entirely into fixed offsets.
406 int64_t FixedValue = Offset.getFixed();
407 int64_t ScalableValue = Offset.getScalable();
408 assert(ScalableValue % 8 == 0 &&
409 "Scalable offset is not a multiple of a single vector size.");
410 int64_t NumOfVReg = ScalableValue / 8;
411 int64_t VLENB = ST.getRealMinVLen() / 8;
412 Offset = StackOffset::getFixed(FixedValue + NumOfVReg * VLENB);
413 }
414
415 if (!isInt<32>(Offset.getFixed())) {
417 "Frame offsets outside of the signed 32-bit range not supported");
418 }
419
420 if (!IsRVVSpill) {
421 if (MI.getOpcode() == RISCV::ADDI && !isInt<12>(Offset.getFixed())) {
422 // We chose to emit the canonical immediate sequence rather than folding
423 // the offset into the using add under the theory that doing so doesn't
424 // save dynamic instruction count and some target may fuse the canonical
425 // 32 bit immediate sequence. We still need to clear the portion of the
426 // offset encoded in the immediate.
427 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
428 } else {
429 // We can encode an add with 12 bit signed immediate in the immediate
430 // operand of our user instruction. As a result, the remaining
431 // offset can by construction, at worst, a LUI and a ADD.
432 int64_t Val = Offset.getFixed();
433 int64_t Lo12 = SignExtend64<12>(Val);
434 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Lo12);
436 Offset.getScalable());
437 }
438 }
439
440 if (Offset.getScalable() || Offset.getFixed()) {
441 Register DestReg;
442 if (MI.getOpcode() == RISCV::ADDI)
443 DestReg = MI.getOperand(0).getReg();
444 else
445 DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
446 adjustReg(*II->getParent(), II, DL, DestReg, FrameReg, Offset,
447 MachineInstr::NoFlags, std::nullopt);
448 MI.getOperand(FIOperandNum).ChangeToRegister(DestReg, /*IsDef*/false,
449 /*IsImp*/false,
450 /*IsKill*/true);
451 } else {
452 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, /*IsDef*/false,
453 /*IsImp*/false,
454 /*IsKill*/false);
455 }
456
457 // If after materializing the adjustment, we have a pointless ADDI, remove it
458 if (MI.getOpcode() == RISCV::ADDI &&
459 MI.getOperand(0).getReg() == MI.getOperand(1).getReg() &&
460 MI.getOperand(2).getImm() == 0) {
461 MI.eraseFromParent();
462 return true;
463 }
464
465 // Handle spill/fill of synthetic register classes for segment operations to
466 // ensure correctness in the edge case one gets spilled. There are many
467 // possible optimizations here, but given the extreme rarity of such spills,
468 // we prefer simplicity of implementation for now.
469 switch (MI.getOpcode()) {
470 case RISCV::PseudoVSPILL2_M1:
471 case RISCV::PseudoVSPILL2_M2:
472 case RISCV::PseudoVSPILL2_M4:
473 case RISCV::PseudoVSPILL3_M1:
474 case RISCV::PseudoVSPILL3_M2:
475 case RISCV::PseudoVSPILL4_M1:
476 case RISCV::PseudoVSPILL4_M2:
477 case RISCV::PseudoVSPILL5_M1:
478 case RISCV::PseudoVSPILL6_M1:
479 case RISCV::PseudoVSPILL7_M1:
480 case RISCV::PseudoVSPILL8_M1:
481 lowerVSPILL(II);
482 return true;
483 case RISCV::PseudoVRELOAD2_M1:
484 case RISCV::PseudoVRELOAD2_M2:
485 case RISCV::PseudoVRELOAD2_M4:
486 case RISCV::PseudoVRELOAD3_M1:
487 case RISCV::PseudoVRELOAD3_M2:
488 case RISCV::PseudoVRELOAD4_M1:
489 case RISCV::PseudoVRELOAD4_M2:
490 case RISCV::PseudoVRELOAD5_M1:
491 case RISCV::PseudoVRELOAD6_M1:
492 case RISCV::PseudoVRELOAD7_M1:
493 case RISCV::PseudoVRELOAD8_M1:
494 lowerVRELOAD(II);
495 return true;
496 }
497
498 return false;
499}
500
502 const MachineFunction &MF) const {
503 return true;
504}
505
506// Returns true if the instruction's frame index reference would be better
507// served by a base register other than FP or SP.
508// Used by LocalStackSlotAllocation pass to determine which frame index
509// references it should create new base registers for.
511 int64_t Offset) const {
512 unsigned FIOperandNum = 0;
513 for (; !MI->getOperand(FIOperandNum).isFI(); FIOperandNum++)
514 assert(FIOperandNum < MI->getNumOperands() &&
515 "Instr doesn't have FrameIndex operand");
516
517 // For RISC-V, The machine instructions that include a FrameIndex operand
518 // are load/store, ADDI instructions.
519 unsigned MIFrm = RISCVII::getFormat(MI->getDesc().TSFlags);
520 if (MIFrm != RISCVII::InstFormatI && MIFrm != RISCVII::InstFormatS)
521 return false;
522 // We only generate virtual base registers for loads and stores, so
523 // return false for everything else.
524 if (!MI->mayLoad() && !MI->mayStore())
525 return false;
526
527 const MachineFunction &MF = *MI->getMF();
528 const MachineFrameInfo &MFI = MF.getFrameInfo();
529 const RISCVFrameLowering *TFI = getFrameLowering(MF);
530 const MachineRegisterInfo &MRI = MF.getRegInfo();
531 unsigned CalleeSavedSize = 0;
532 Offset += getFrameIndexInstrOffset(MI, FIOperandNum);
533
534 // Estimate the stack size used to store callee saved registers(
535 // excludes reserved registers).
536 BitVector ReservedRegs = getReservedRegs(MF);
537 for (const MCPhysReg *R = MRI.getCalleeSavedRegs(); MCPhysReg Reg = *R; ++R) {
538 if (!ReservedRegs.test(Reg))
539 CalleeSavedSize += getSpillSize(*getMinimalPhysRegClass(Reg));
540 }
541
542 int64_t MaxFPOffset = Offset - CalleeSavedSize;
543 if (TFI->hasFP(MF) && !shouldRealignStack(MF))
544 return !isFrameOffsetLegal(MI, RISCV::X8, MaxFPOffset);
545
546 // Assume 128 bytes spill slots size to estimate the maximum possible
547 // offset relative to the stack pointer.
548 // FIXME: The 128 is copied from ARM. We should run some statistics and pick a
549 // real one for RISC-V.
550 int64_t MaxSPOffset = Offset + 128;
551 MaxSPOffset += MFI.getLocalFrameSize();
552 return !isFrameOffsetLegal(MI, RISCV::X2, MaxSPOffset);
553}
554
555// Determine whether a given base register plus offset immediate is
556// encodable to resolve a frame index.
558 Register BaseReg,
559 int64_t Offset) const {
560 unsigned FIOperandNum = 0;
561 while (!MI->getOperand(FIOperandNum).isFI()) {
562 FIOperandNum++;
563 assert(FIOperandNum < MI->getNumOperands() &&
564 "Instr does not have a FrameIndex operand!");
565 }
566
567 Offset += getFrameIndexInstrOffset(MI, FIOperandNum);
568 return isInt<12>(Offset);
569}
570
571// Insert defining instruction(s) for a pointer to FrameIdx before
572// insertion point I.
573// Return materialized frame pointer.
575 int FrameIdx,
576 int64_t Offset) const {
578 DebugLoc DL;
579 if (MBBI != MBB->end())
580 DL = MBBI->getDebugLoc();
582 MachineRegisterInfo &MFI = MF->getRegInfo();
584
585 Register BaseReg = MFI.createVirtualRegister(&RISCV::GPRRegClass);
586 BuildMI(*MBB, MBBI, DL, TII->get(RISCV::ADDI), BaseReg)
587 .addFrameIndex(FrameIdx)
588 .addImm(Offset);
589 return BaseReg;
590}
591
592// Resolve a frame index operand of an instruction to reference the
593// indicated base register plus offset instead.
595 int64_t Offset) const {
596 unsigned FIOperandNum = 0;
597 while (!MI.getOperand(FIOperandNum).isFI()) {
598 FIOperandNum++;
599 assert(FIOperandNum < MI.getNumOperands() &&
600 "Instr does not have a FrameIndex operand!");
601 }
602
603 Offset += getFrameIndexInstrOffset(&MI, FIOperandNum);
604 // FrameIndex Operands are always represented as a
605 // register followed by an immediate.
606 MI.getOperand(FIOperandNum).ChangeToRegister(BaseReg, false);
607 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
608}
609
610// Get the offset from the referenced frame index in the instruction,
611// if there is one.
613 int Idx) const {
614 assert((RISCVII::getFormat(MI->getDesc().TSFlags) == RISCVII::InstFormatI ||
615 RISCVII::getFormat(MI->getDesc().TSFlags) == RISCVII::InstFormatS) &&
616 "The MI must be I or S format.");
617 assert(MI->getOperand(Idx).isFI() && "The Idx'th operand of MI is not a "
618 "FrameIndex operand");
619 return MI->getOperand(Idx + 1).getImm();
620}
621
623 const TargetFrameLowering *TFI = getFrameLowering(MF);
624 return TFI->hasFP(MF) ? RISCV::X8 : RISCV::X2;
625}
626
627const uint32_t *
629 CallingConv::ID CC) const {
630 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
631
632 if (CC == CallingConv::GHC)
633 return CSR_NoRegs_RegMask;
634 switch (Subtarget.getTargetABI()) {
635 default:
636 llvm_unreachable("Unrecognized ABI");
639 return CSR_ILP32_LP64_RegMask;
642 return CSR_ILP32F_LP64F_RegMask;
645 return CSR_ILP32D_LP64D_RegMask;
646 }
647}
648
651 const MachineFunction &) const {
652 if (RC == &RISCV::VMV0RegClass)
653 return &RISCV::VRRegClass;
654 return RC;
655}
656
658 SmallVectorImpl<uint64_t> &Ops) const {
659 // VLENB is the length of a vector register in bytes. We use <vscale x 8 x i8>
660 // to represent one vector register. The dwarf offset is
661 // VLENB * scalable_offset / 8.
662 assert(Offset.getScalable() % 8 == 0 && "Invalid frame offset");
663
664 // Add fixed-sized offset using existing DIExpression interface.
665 DIExpression::appendOffset(Ops, Offset.getFixed());
666
667 unsigned VLENB = getDwarfRegNum(RISCV::VLENB, true);
668 int64_t VLENBSized = Offset.getScalable() / 8;
669 if (VLENBSized > 0) {
670 Ops.push_back(dwarf::DW_OP_constu);
671 Ops.push_back(VLENBSized);
672 Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL});
673 Ops.push_back(dwarf::DW_OP_mul);
674 Ops.push_back(dwarf::DW_OP_plus);
675 } else if (VLENBSized < 0) {
676 Ops.push_back(dwarf::DW_OP_constu);
677 Ops.push_back(-VLENBSized);
678 Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL});
679 Ops.push_back(dwarf::DW_OP_mul);
680 Ops.push_back(dwarf::DW_OP_minus);
681 }
682}
683
684unsigned
686 return MF.getSubtarget<RISCVSubtarget>().hasStdExtCOrZca() ? 1 : 0;
687}
688
689// Add two address hints to improve chances of being able to use a compressed
690// instruction.
692 Register VirtReg, ArrayRef<MCPhysReg> Order,
694 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
695 const MachineRegisterInfo *MRI = &MF.getRegInfo();
696
697 bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints(
698 VirtReg, Order, Hints, MF, VRM, Matrix);
699
700 if (!VRM || DisableRegAllocHints)
701 return BaseImplRetVal;
702
703 // Add any two address hints after any copy hints.
704 SmallSet<Register, 4> TwoAddrHints;
705
706 auto tryAddHint = [&](const MachineOperand &VRRegMO, const MachineOperand &MO,
707 bool NeedGPRC) -> void {
708 Register Reg = MO.getReg();
709 Register PhysReg = Reg.isPhysical() ? Reg : Register(VRM->getPhys(Reg));
710 if (PhysReg && (!NeedGPRC || RISCV::GPRCRegClass.contains(PhysReg))) {
711 assert(!MO.getSubReg() && !VRRegMO.getSubReg() && "Unexpected subreg!");
712 if (!MRI->isReserved(PhysReg) && !is_contained(Hints, PhysReg))
713 TwoAddrHints.insert(PhysReg);
714 }
715 };
716
717 // This is all of the compressible binary instructions. If an instruction
718 // needs GPRC register class operands \p NeedGPRC will be set to true.
719 auto isCompressible = [](const MachineInstr &MI, bool &NeedGPRC) {
720 NeedGPRC = false;
721 switch (MI.getOpcode()) {
722 default:
723 return false;
724 case RISCV::AND:
725 case RISCV::OR:
726 case RISCV::XOR:
727 case RISCV::SUB:
728 case RISCV::ADDW:
729 case RISCV::SUBW:
730 NeedGPRC = true;
731 return true;
732 case RISCV::ANDI:
733 NeedGPRC = true;
734 return MI.getOperand(2).isImm() && isInt<6>(MI.getOperand(2).getImm());
735 case RISCV::SRAI:
736 case RISCV::SRLI:
737 NeedGPRC = true;
738 return true;
739 case RISCV::ADD:
740 case RISCV::SLLI:
741 return true;
742 case RISCV::ADDI:
743 case RISCV::ADDIW:
744 return MI.getOperand(2).isImm() && isInt<6>(MI.getOperand(2).getImm());
745 }
746 };
747
748 // Returns true if this operand is compressible. For non-registers it always
749 // returns true. Immediate range was already checked in isCompressible.
750 // For registers, it checks if the register is a GPRC register. reg-reg
751 // instructions that require GPRC need all register operands to be GPRC.
752 auto isCompressibleOpnd = [&](const MachineOperand &MO) {
753 if (!MO.isReg())
754 return true;
755 Register Reg = MO.getReg();
756 Register PhysReg = Reg.isPhysical() ? Reg : Register(VRM->getPhys(Reg));
757 return PhysReg && RISCV::GPRCRegClass.contains(PhysReg);
758 };
759
760 for (auto &MO : MRI->reg_nodbg_operands(VirtReg)) {
761 const MachineInstr &MI = *MO.getParent();
762 unsigned OpIdx = MO.getOperandNo();
763 bool NeedGPRC;
764 if (isCompressible(MI, NeedGPRC)) {
765 if (OpIdx == 0 && MI.getOperand(1).isReg()) {
766 if (!NeedGPRC || isCompressibleOpnd(MI.getOperand(2)))
767 tryAddHint(MO, MI.getOperand(1), NeedGPRC);
768 if (MI.isCommutable() && MI.getOperand(2).isReg() &&
769 (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1))))
770 tryAddHint(MO, MI.getOperand(2), NeedGPRC);
771 } else if (OpIdx == 1 &&
772 (!NeedGPRC || isCompressibleOpnd(MI.getOperand(2)))) {
773 tryAddHint(MO, MI.getOperand(0), NeedGPRC);
774 } else if (MI.isCommutable() && OpIdx == 2 &&
775 (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1)))) {
776 tryAddHint(MO, MI.getOperand(0), NeedGPRC);
777 }
778 }
779 }
780
781 for (MCPhysReg OrderReg : Order)
782 if (TwoAddrHints.count(OrderReg))
783 Hints.push_back(OrderReg);
784
785 return BaseImplRetVal;
786}
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
Live Register Matrix
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
#define P(N)
static cl::opt< bool > DisableRegAllocHints("riscv-disable-regalloc-hints", cl::Hidden, cl::init(false), cl::desc("Disable two address hints for register " "allocation"))
static const std::pair< unsigned, int > FixedCSRFIMap[]
This file declares the machine register scavenger class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static unsigned getDwarfRegNum(unsigned Reg, const TargetRegisterInfo *TRI)
Go up the super-register chain until we hit a valid dwarf register number.
Definition: StackMaps.cpp:195
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:470
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
bool test(unsigned Idx) const
Definition: BitVector.h:461
static void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
A debug info location.
Definition: DebugLoc.h:33
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:237
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:644
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:24
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int64_t getLocalFrameSize() const
Get the size of the local object blob.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
Definition: MachineInstr.h:68
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool hasBP(const MachineFunction &MF) const
bool hasFP(const MachineFunction &MF) const override
hasFP - Return true if the specified function should have a dedicated frame pointer register.
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
bool isRegisterReservedByUser(Register i) const
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:166
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:687
void push_back(const T &Elt)
Definition: SmallVector.h:416
StackOffset holds a fixed and a scalable offset in bytes.
Definition: TypeSize.h:36
int64_t getFixed() const
Returns the fixed component of the stack.
Definition: TypeSize.h:52
static StackOffset get(int64_t Fixed, int64_t Scalable)
Definition: TypeSize.h:47
Information about stack frame layout on the target.
virtual bool hasFP(const MachineFunction &MF) const =0
hasFP - Return true if the specified function should have a dedicated frame pointer register.
TargetInstrInfo - Interface to description of machine instruction set.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM=nullptr, const LiveRegMatrix *Matrix=nullptr) const
Get a list of 'hint' registers that the register allocator should try first when allocating a physica...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
MCRegister getPhys(Register virtReg) const
returns the physical register mapped to the specified virtual register
Definition: VirtRegMap.h:105
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
Definition: CallingConv.h:50
MCRegister getBPReg()
static unsigned getFormat(uint64_t TSFlags)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
bool isRVVSpill(const MachineInstr &MI)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Kill
The last use of a register.
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:445
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:440
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition: MathExtras.h:382
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:145
unsigned getKillRegState(bool B)
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1846
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1976
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override
const TargetRegisterClass * getLargestLegalSuperClass(const TargetRegisterClass *RC, const MachineFunction &) const override
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
void lowerVRELOAD(MachineBasicBlock::iterator II) const
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
Register materializeFrameBaseRegister(MachineBasicBlock *MBB, int FrameIdx, int64_t Offset) const override
RISCVRegisterInfo(unsigned HwMode)
void getOffsetOpcodes(const StackOffset &Offset, SmallVectorImpl< uint64_t > &Ops) const override
bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, int64_t Offset) const override
void lowerVSPILL(MachineBasicBlock::iterator II) const
Register getFrameRegister(const MachineFunction &MF) const override
void adjustReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, Register SrcReg, StackOffset Offset, MachineInstr::MIFlag Flag, MaybeAlign RequiredAlign) const
bool isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const override
const uint32_t * getNoPreservedMask() const override
void resolveFrameIndex(MachineInstr &MI, Register BaseReg, int64_t Offset) const override
bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const override
bool hasReservedSpillSlot(const MachineFunction &MF, Register Reg, int &FrameIdx) const override
int64_t getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const override
unsigned getRegisterCostTableIndex(const MachineFunction &MF) const override
bool eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const override