LLVM 22.0.0git
RISCVRegisterInfo.cpp
Go to the documentation of this file.
1//===-- RISCVRegisterInfo.cpp - RISC-V Register Information -----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the RISC-V implementation of the TargetRegisterInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "RISCVRegisterInfo.h"
14#include "RISCV.h"
15#include "RISCVSubtarget.h"
16#include "llvm/ADT/SmallSet.h"
26
27#define GET_REGINFO_TARGET_DESC
28#include "RISCVGenRegisterInfo.inc"
29
30using namespace llvm;
31
32static cl::opt<bool> DisableCostPerUse("riscv-disable-cost-per-use",
33 cl::init(false), cl::Hidden);
34static cl::opt<bool>
35 DisableRegAllocHints("riscv-disable-regalloc-hints", cl::Hidden,
36 cl::init(false),
37 cl::desc("Disable two address hints for register "
38 "allocation"));
39
40static_assert(RISCV::X1 == RISCV::X0 + 1, "Register list not consecutive");
41static_assert(RISCV::X31 == RISCV::X0 + 31, "Register list not consecutive");
42static_assert(RISCV::F1_H == RISCV::F0_H + 1, "Register list not consecutive");
43static_assert(RISCV::F31_H == RISCV::F0_H + 31,
44 "Register list not consecutive");
45static_assert(RISCV::F1_F == RISCV::F0_F + 1, "Register list not consecutive");
46static_assert(RISCV::F31_F == RISCV::F0_F + 31,
47 "Register list not consecutive");
48static_assert(RISCV::F1_D == RISCV::F0_D + 1, "Register list not consecutive");
49static_assert(RISCV::F31_D == RISCV::F0_D + 31,
50 "Register list not consecutive");
51static_assert(RISCV::F1_Q == RISCV::F0_Q + 1, "Register list not consecutive");
52static_assert(RISCV::F31_Q == RISCV::F0_Q + 31,
53 "Register list not consecutive");
54static_assert(RISCV::V1 == RISCV::V0 + 1, "Register list not consecutive");
55static_assert(RISCV::V31 == RISCV::V0 + 31, "Register list not consecutive");
56
58 : RISCVGenRegisterInfo(RISCV::X1, /*DwarfFlavour*/0, /*EHFlavor*/0,
59 /*PC*/0, HwMode) {}
60
61const MCPhysReg *
63 return CSR_IPRA_SaveList;
64}
65
66const MCPhysReg *
68 auto &Subtarget = MF->getSubtarget<RISCVSubtarget>();
70 return CSR_NoRegs_SaveList;
72 return Subtarget.hasStdExtE() ? CSR_RT_MostRegs_RVE_SaveList
73 : CSR_RT_MostRegs_SaveList;
74 if (MF->getFunction().hasFnAttribute("interrupt")) {
75 if (Subtarget.hasVInstructions()) {
76 if (Subtarget.hasStdExtD())
77 return Subtarget.hasStdExtE() ? CSR_XLEN_F64_V_Interrupt_RVE_SaveList
78 : CSR_XLEN_F64_V_Interrupt_SaveList;
79 if (Subtarget.hasStdExtF())
80 return Subtarget.hasStdExtE() ? CSR_XLEN_F32_V_Interrupt_RVE_SaveList
81 : CSR_XLEN_F32_V_Interrupt_SaveList;
82 return Subtarget.hasStdExtE() ? CSR_XLEN_V_Interrupt_RVE_SaveList
83 : CSR_XLEN_V_Interrupt_SaveList;
84 }
85 if (Subtarget.hasStdExtD())
86 return Subtarget.hasStdExtE() ? CSR_XLEN_F64_Interrupt_RVE_SaveList
87 : CSR_XLEN_F64_Interrupt_SaveList;
88 if (Subtarget.hasStdExtF())
89 return Subtarget.hasStdExtE() ? CSR_XLEN_F32_Interrupt_RVE_SaveList
90 : CSR_XLEN_F32_Interrupt_SaveList;
91 return Subtarget.hasStdExtE() ? CSR_Interrupt_RVE_SaveList
92 : CSR_Interrupt_SaveList;
93 }
94
95 bool HasVectorCSR =
97 Subtarget.hasVInstructions();
98
99 switch (Subtarget.getTargetABI()) {
100 default:
101 llvm_unreachable("Unrecognized ABI");
104 return CSR_ILP32E_LP64E_SaveList;
107 if (HasVectorCSR)
108 return CSR_ILP32_LP64_V_SaveList;
109 return CSR_ILP32_LP64_SaveList;
112 if (HasVectorCSR)
113 return CSR_ILP32F_LP64F_V_SaveList;
114 return CSR_ILP32F_LP64F_SaveList;
117 if (HasVectorCSR)
118 return CSR_ILP32D_LP64D_V_SaveList;
119 return CSR_ILP32D_LP64D_SaveList;
120 }
121}
122
124 const RISCVFrameLowering *TFI = getFrameLowering(MF);
125 BitVector Reserved(getNumRegs());
126 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
127
128 for (size_t Reg = 0; Reg < getNumRegs(); Reg++) {
129 // Mark any GPRs requested to be reserved as such
130 if (Subtarget.isRegisterReservedByUser(Reg))
131 markSuperRegs(Reserved, Reg);
132
133 // Mark all the registers defined as constant in TableGen as reserved.
134 if (isConstantPhysReg(Reg))
135 markSuperRegs(Reserved, Reg);
136 }
137
138 // Use markSuperRegs to ensure any register aliases are also reserved
139 markSuperRegs(Reserved, RISCV::X2_H); // sp
140 markSuperRegs(Reserved, RISCV::X3_H); // gp
141 markSuperRegs(Reserved, RISCV::X4_H); // tp
142 if (TFI->hasFP(MF))
143 markSuperRegs(Reserved, RISCV::X8_H); // fp
144 // Reserve the base register if we need to realign the stack and allocate
145 // variable-sized objects at runtime.
146 if (TFI->hasBP(MF))
147 markSuperRegs(Reserved, RISCVABI::getBPReg()); // bp
148
149 // Additionally reserve dummy register used to form the register pair
150 // beginning with 'x0' for instructions that take register pairs.
151 markSuperRegs(Reserved, RISCV::DUMMY_REG_PAIR_WITH_X0);
152
153 // There are only 16 GPRs for RVE.
154 if (Subtarget.hasStdExtE())
155 for (MCPhysReg Reg = RISCV::X16_H; Reg <= RISCV::X31_H; Reg++)
156 markSuperRegs(Reserved, Reg);
157
158 // V registers for code generation. We handle them manually.
159 markSuperRegs(Reserved, RISCV::VL);
160 markSuperRegs(Reserved, RISCV::VTYPE);
161 markSuperRegs(Reserved, RISCV::VXSAT);
162 markSuperRegs(Reserved, RISCV::VXRM);
163
164 // Floating point environment registers.
165 markSuperRegs(Reserved, RISCV::FRM);
166 markSuperRegs(Reserved, RISCV::FFLAGS);
167
168 // SiFive VCIX state registers.
169 markSuperRegs(Reserved, RISCV::SF_VCIX_STATE);
170
172 if (Subtarget.hasStdExtE())
173 reportFatalUsageError("Graal reserved registers do not exist in RVE");
174 markSuperRegs(Reserved, RISCV::X23_H);
175 markSuperRegs(Reserved, RISCV::X27_H);
176 }
177
178 // Shadow stack pointer.
179 markSuperRegs(Reserved, RISCV::SSP);
180
181 // XSfmmbase
182 for (MCPhysReg Reg = RISCV::T0; Reg <= RISCV::T15; Reg++)
183 markSuperRegs(Reserved, Reg);
184
185 assert(checkAllSuperRegsMarked(Reserved));
186 return Reserved;
187}
188
190 MCRegister PhysReg) const {
191 return !MF.getSubtarget().isRegisterReservedByUser(PhysReg);
192}
193
195 return CSR_NoRegs_RegMask;
196}
197
200 const DebugLoc &DL, Register DestReg,
203 MaybeAlign RequiredAlign) const {
204
205 if (DestReg == SrcReg && !Offset.getFixed() && !Offset.getScalable())
206 return;
207
208 MachineFunction &MF = *MBB.getParent();
211 const RISCVInstrInfo *TII = ST.getInstrInfo();
212
213 // Optimize compile time offset case
214 if (Offset.getScalable()) {
215 if (auto VLEN = ST.getRealVLen()) {
216 // 1. Multiply the number of v-slots by the (constant) length of register
217 const int64_t VLENB = *VLEN / 8;
218 assert(Offset.getScalable() % RISCV::RVVBytesPerBlock == 0 &&
219 "Reserve the stack by the multiple of one vector size.");
220 const int64_t NumOfVReg = Offset.getScalable() / 8;
221 const int64_t FixedOffset = NumOfVReg * VLENB;
222 if (!isInt<32>(FixedOffset)) {
224 "Frame size outside of the signed 32-bit range not supported");
225 }
226 Offset = StackOffset::getFixed(FixedOffset + Offset.getFixed());
227 }
228 }
229
230 bool KillSrcReg = false;
231
232 if (Offset.getScalable()) {
233 unsigned ScalableAdjOpc = RISCV::ADD;
234 int64_t ScalableValue = Offset.getScalable();
235 if (ScalableValue < 0) {
236 ScalableValue = -ScalableValue;
237 ScalableAdjOpc = RISCV::SUB;
238 }
239 // Get vlenb and multiply vlen with the number of vector registers.
240 Register ScratchReg = DestReg;
241 if (DestReg == SrcReg)
242 ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
243
244 assert(ScalableValue > 0 && "There is no need to get VLEN scaled value.");
245 assert(ScalableValue % RISCV::RVVBytesPerBlock == 0 &&
246 "Reserve the stack by the multiple of one vector size.");
247 assert(isInt<32>(ScalableValue / RISCV::RVVBytesPerBlock) &&
248 "Expect the number of vector registers within 32-bits.");
249 uint32_t NumOfVReg = ScalableValue / RISCV::RVVBytesPerBlock;
250 // Only use vsetvli rather than vlenb if adjusting in the prologue or
251 // epilogue, otherwise it may disturb the VTYPE and VL status.
252 bool IsPrologueOrEpilogue =
254 bool UseVsetvliRatherThanVlenb =
255 IsPrologueOrEpilogue && ST.preferVsetvliOverReadVLENB();
256 if (UseVsetvliRatherThanVlenb && (NumOfVReg == 1 || NumOfVReg == 2 ||
257 NumOfVReg == 4 || NumOfVReg == 8)) {
258 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENBViaVSETVLIX0),
259 ScratchReg)
260 .addImm(NumOfVReg)
261 .setMIFlag(Flag);
262 BuildMI(MBB, II, DL, TII->get(ScalableAdjOpc), DestReg)
263 .addReg(SrcReg)
264 .addReg(ScratchReg, RegState::Kill)
265 .setMIFlag(Flag);
266 } else {
267 if (UseVsetvliRatherThanVlenb)
268 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENBViaVSETVLIX0),
269 ScratchReg)
270 .addImm(1)
271 .setMIFlag(Flag);
272 else
273 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), ScratchReg)
274 .setMIFlag(Flag);
275
276 if (ScalableAdjOpc == RISCV::ADD && ST.hasStdExtZba() &&
277 (NumOfVReg == 2 || NumOfVReg == 4 || NumOfVReg == 8)) {
278 unsigned Opc = NumOfVReg == 2
279 ? RISCV::SH1ADD
280 : (NumOfVReg == 4 ? RISCV::SH2ADD : RISCV::SH3ADD);
281 BuildMI(MBB, II, DL, TII->get(Opc), DestReg)
282 .addReg(ScratchReg, RegState::Kill)
283 .addReg(SrcReg)
284 .setMIFlag(Flag);
285 } else {
286 TII->mulImm(MF, MBB, II, DL, ScratchReg, NumOfVReg, Flag);
287 BuildMI(MBB, II, DL, TII->get(ScalableAdjOpc), DestReg)
288 .addReg(SrcReg)
289 .addReg(ScratchReg, RegState::Kill)
290 .setMIFlag(Flag);
291 }
292 }
293 SrcReg = DestReg;
294 KillSrcReg = true;
295 }
296
297 int64_t Val = Offset.getFixed();
298 if (DestReg == SrcReg && Val == 0)
299 return;
300
301 const uint64_t Align = RequiredAlign.valueOrOne().value();
302
303 if (isInt<12>(Val)) {
304 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
305 .addReg(SrcReg, getKillRegState(KillSrcReg))
306 .addImm(Val)
307 .setMIFlag(Flag);
308 return;
309 }
310
311 // Use the QC_E_ADDI instruction from the Xqcilia extension that can take a
312 // signed 26-bit immediate.
313 if (ST.hasVendorXqcilia() && isInt<26>(Val)) {
314 // The one case where using this instruction is sub-optimal is if Val can be
315 // materialized with a single compressible LUI and following add/sub is also
316 // compressible. Avoid doing this if that is the case.
317 int Hi20 = (Val & 0xFFFFF000) >> 12;
318 bool IsCompressLUI =
319 ((Val & 0xFFF) == 0) && (Hi20 != 0) &&
320 (isUInt<5>(Hi20) || (Hi20 >= 0xfffe0 && Hi20 <= 0xfffff));
321 bool IsCompressAddSub =
322 (SrcReg == DestReg) &&
323 ((Val > 0 && RISCV::GPRNoX0RegClass.contains(SrcReg)) ||
324 (Val < 0 && RISCV::GPRCRegClass.contains(SrcReg)));
325
326 if (!(IsCompressLUI && IsCompressAddSub)) {
327 BuildMI(MBB, II, DL, TII->get(RISCV::QC_E_ADDI), DestReg)
328 .addReg(SrcReg, getKillRegState(KillSrcReg))
329 .addImm(Val)
330 .setMIFlag(Flag);
331 return;
332 }
333 }
334
335 // Try to split the offset across two ADDIs. We need to keep the intermediate
336 // result aligned after each ADDI. We need to determine the maximum value we
337 // can put in each ADDI. In the negative direction, we can use -2048 which is
338 // always sufficiently aligned. In the positive direction, we need to find the
339 // largest 12-bit immediate that is aligned. Exclude -4096 since it can be
340 // created with LUI.
341 assert(Align < 2048 && "Required alignment too large");
342 int64_t MaxPosAdjStep = 2048 - Align;
343 if (Val > -4096 && Val <= (2 * MaxPosAdjStep)) {
344 int64_t FirstAdj = Val < 0 ? -2048 : MaxPosAdjStep;
345 Val -= FirstAdj;
346 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
347 .addReg(SrcReg, getKillRegState(KillSrcReg))
348 .addImm(FirstAdj)
349 .setMIFlag(Flag);
350 BuildMI(MBB, II, DL, TII->get(RISCV::ADDI), DestReg)
351 .addReg(DestReg, RegState::Kill)
352 .addImm(Val)
353 .setMIFlag(Flag);
354 return;
355 }
356
357 // Use shNadd if doing so lets us materialize a 12 bit immediate with a single
358 // instruction. This saves 1 instruction over the full lui/addi+add fallback
359 // path. We avoid anything which can be done with a single lui as it might
360 // be compressible. Note that the sh1add case is fully covered by the 2x addi
361 // case just above and is thus omitted.
362 if (ST.hasStdExtZba() && (Val & 0xFFF) != 0) {
363 unsigned Opc = 0;
364 if (isShiftedInt<12, 3>(Val)) {
365 Opc = RISCV::SH3ADD;
366 Val = Val >> 3;
367 } else if (isShiftedInt<12, 2>(Val)) {
368 Opc = RISCV::SH2ADD;
369 Val = Val >> 2;
370 }
371 if (Opc) {
372 Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
373 TII->movImm(MBB, II, DL, ScratchReg, Val, Flag);
374 BuildMI(MBB, II, DL, TII->get(Opc), DestReg)
375 .addReg(ScratchReg, RegState::Kill)
376 .addReg(SrcReg, getKillRegState(KillSrcReg))
377 .setMIFlag(Flag);
378 return;
379 }
380 }
381
382 unsigned Opc = RISCV::ADD;
383 if (Val < 0) {
384 Val = -Val;
385 Opc = RISCV::SUB;
386 }
387
388 Register ScratchReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
389 TII->movImm(MBB, II, DL, ScratchReg, Val, Flag);
390 BuildMI(MBB, II, DL, TII->get(Opc), DestReg)
391 .addReg(SrcReg, getKillRegState(KillSrcReg))
392 .addReg(ScratchReg, RegState::Kill)
393 .setMIFlag(Flag);
394}
395
396static std::tuple<RISCVVType::VLMUL, const TargetRegisterClass &, unsigned>
397getSpillReloadInfo(unsigned NumRemaining, uint16_t RegEncoding, bool IsSpill) {
398 if (NumRemaining >= 8 && RegEncoding % 8 == 0)
399 return {RISCVVType::LMUL_8, RISCV::VRM8RegClass,
400 IsSpill ? RISCV::VS8R_V : RISCV::VL8RE8_V};
401 if (NumRemaining >= 4 && RegEncoding % 4 == 0)
402 return {RISCVVType::LMUL_4, RISCV::VRM4RegClass,
403 IsSpill ? RISCV::VS4R_V : RISCV::VL4RE8_V};
404 if (NumRemaining >= 2 && RegEncoding % 2 == 0)
405 return {RISCVVType::LMUL_2, RISCV::VRM2RegClass,
406 IsSpill ? RISCV::VS2R_V : RISCV::VL2RE8_V};
407 return {RISCVVType::LMUL_1, RISCV::VRRegClass,
408 IsSpill ? RISCV::VS1R_V : RISCV::VL1RE8_V};
409}
410
411// Split a VSPILLx_Mx/VSPILLx_Mx pseudo into multiple whole register stores
412// separated by LMUL*VLENB bytes.
414 bool IsSpill) const {
415 DebugLoc DL = II->getDebugLoc();
416 MachineBasicBlock &MBB = *II->getParent();
417 MachineFunction &MF = *MBB.getParent();
419 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
420 const TargetInstrInfo *TII = STI.getInstrInfo();
422
423 auto ZvlssegInfo = RISCV::isRVVSpillForZvlsseg(II->getOpcode());
424 unsigned NF = ZvlssegInfo->first;
425 unsigned LMUL = ZvlssegInfo->second;
426 unsigned NumRegs = NF * LMUL;
427 assert(NumRegs <= 8 && "Invalid NF/LMUL combinations.");
428
429 Register Reg = II->getOperand(0).getReg();
430 uint16_t RegEncoding = TRI->getEncodingValue(Reg);
431 Register Base = II->getOperand(1).getReg();
432 bool IsBaseKill = II->getOperand(1).isKill();
433 Register NewBase = MRI.createVirtualRegister(&RISCV::GPRRegClass);
434
435 auto *OldMMO = *(II->memoperands_begin());
436 LocationSize OldLoc = OldMMO->getSize();
437 assert(OldLoc.isPrecise() && OldLoc.getValue().isKnownMultipleOf(NF));
438 TypeSize VRegSize = OldLoc.getValue().divideCoefficientBy(NumRegs);
439
440 Register VLENB = 0;
441 unsigned PreHandledNum = 0;
442 unsigned I = 0;
443 while (I != NumRegs) {
444 auto [LMulHandled, RegClass, Opcode] =
445 getSpillReloadInfo(NumRegs - I, RegEncoding, IsSpill);
446 auto [RegNumHandled, _] = RISCVVType::decodeVLMUL(LMulHandled);
447 bool IsLast = I + RegNumHandled == NumRegs;
448 if (PreHandledNum) {
449 Register Step;
450 // Optimize for constant VLEN.
451 if (auto VLEN = STI.getRealVLen()) {
452 int64_t Offset = *VLEN / 8 * PreHandledNum;
453 Step = MRI.createVirtualRegister(&RISCV::GPRRegClass);
454 STI.getInstrInfo()->movImm(MBB, II, DL, Step, Offset);
455 } else {
456 if (!VLENB) {
457 VLENB = MRI.createVirtualRegister(&RISCV::GPRRegClass);
458 BuildMI(MBB, II, DL, TII->get(RISCV::PseudoReadVLENB), VLENB);
459 }
460 uint32_t ShiftAmount = Log2_32(PreHandledNum);
461 if (ShiftAmount == 0)
462 Step = VLENB;
463 else {
464 Step = MRI.createVirtualRegister(&RISCV::GPRRegClass);
465 BuildMI(MBB, II, DL, TII->get(RISCV::SLLI), Step)
466 .addReg(VLENB, getKillRegState(IsLast))
467 .addImm(ShiftAmount);
468 }
469 }
470
471 BuildMI(MBB, II, DL, TII->get(RISCV::ADD), NewBase)
472 .addReg(Base, getKillRegState(I != 0 || IsBaseKill))
473 .addReg(Step, getKillRegState(Step != VLENB || IsLast));
474 Base = NewBase;
475 }
476
477 MCRegister ActualReg = findVRegWithEncoding(RegClass, RegEncoding);
479 BuildMI(MBB, II, DL, TII->get(Opcode))
480 .addReg(ActualReg, getDefRegState(!IsSpill))
481 .addReg(Base, getKillRegState(IsLast))
482 .addMemOperand(MF.getMachineMemOperand(OldMMO, OldMMO->getOffset(),
483 VRegSize * RegNumHandled));
484
485 // Adding implicit-use of super register to describe we are using part of
486 // super register, that prevents machine verifier complaining when part of
487 // subreg is undef, see comment in MachineVerifier::checkLiveness for more
488 // detail.
489 if (IsSpill)
490 MIB.addReg(Reg, RegState::Implicit);
491
492 PreHandledNum = RegNumHandled;
493 RegEncoding += RegNumHandled;
494 I += RegNumHandled;
495 }
496 II->eraseFromParent();
497}
498
500 int SPAdj, unsigned FIOperandNum,
501 RegScavenger *RS) const {
502 assert(SPAdj == 0 && "Unexpected non-zero SPAdj value");
503
504 MachineInstr &MI = *II;
505 MachineFunction &MF = *MI.getParent()->getParent();
507 DebugLoc DL = MI.getDebugLoc();
508
509 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
510 Register FrameReg;
512 getFrameLowering(MF)->getFrameIndexReference(MF, FrameIndex, FrameReg);
513 bool IsRVVSpill = RISCV::isRVVSpill(MI);
514 if (!IsRVVSpill)
515 Offset += StackOffset::getFixed(MI.getOperand(FIOperandNum + 1).getImm());
516
517 if (!isInt<32>(Offset.getFixed())) {
519 "Frame offsets outside of the signed 32-bit range not supported");
520 }
521
522 if (!IsRVVSpill) {
523 int64_t Val = Offset.getFixed();
524 int64_t Lo12 = SignExtend64<12>(Val);
525 unsigned Opc = MI.getOpcode();
526
527 if (Opc == RISCV::ADDI && !isInt<12>(Val)) {
528 // We chose to emit the canonical immediate sequence rather than folding
529 // the offset into the using add under the theory that doing so doesn't
530 // save dynamic instruction count and some target may fuse the canonical
531 // 32 bit immediate sequence. We still need to clear the portion of the
532 // offset encoded in the immediate.
533 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
534 } else if ((Opc == RISCV::PREFETCH_I || Opc == RISCV::PREFETCH_R ||
535 Opc == RISCV::PREFETCH_W) &&
536 (Lo12 & 0b11111) != 0) {
537 // Prefetch instructions require the offset to be 32 byte aligned.
538 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
539 } else if (Opc == RISCV::MIPS_PREF && !isUInt<9>(Val)) {
540 // MIPS Prefetch instructions require the offset to be 9 bits encoded.
541 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
542 } else if ((Opc == RISCV::PseudoRV32ZdinxLD ||
543 Opc == RISCV::PseudoRV32ZdinxSD) &&
544 Lo12 >= 2044) {
545 // This instruction will be split into 2 instructions. The second
546 // instruction will add 4 to the immediate. If that would overflow 12
547 // bits, we can't fold the offset.
548 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
549 } else {
550 // We can encode an add with 12 bit signed immediate in the immediate
551 // operand of our user instruction. As a result, the remaining
552 // offset can by construction, at worst, a LUI and a ADD.
553 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Lo12);
555 Offset.getScalable());
556 }
557 }
558
559 if (Offset.getScalable() || Offset.getFixed()) {
560 Register DestReg;
561 if (MI.getOpcode() == RISCV::ADDI)
562 DestReg = MI.getOperand(0).getReg();
563 else
564 DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
565 adjustReg(*II->getParent(), II, DL, DestReg, FrameReg, Offset,
566 MachineInstr::NoFlags, std::nullopt);
567 MI.getOperand(FIOperandNum).ChangeToRegister(DestReg, /*IsDef*/false,
568 /*IsImp*/false,
569 /*IsKill*/true);
570 } else {
571 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, /*IsDef*/false,
572 /*IsImp*/false,
573 /*IsKill*/false);
574 }
575
576 // If after materializing the adjustment, we have a pointless ADDI, remove it
577 if (MI.getOpcode() == RISCV::ADDI &&
578 MI.getOperand(0).getReg() == MI.getOperand(1).getReg() &&
579 MI.getOperand(2).getImm() == 0) {
580 MI.eraseFromParent();
581 return true;
582 }
583
584 // Handle spill/fill of synthetic register classes for segment operations to
585 // ensure correctness in the edge case one gets spilled.
586 switch (MI.getOpcode()) {
587 case RISCV::PseudoVSPILL2_M1:
588 case RISCV::PseudoVSPILL2_M2:
589 case RISCV::PseudoVSPILL2_M4:
590 case RISCV::PseudoVSPILL3_M1:
591 case RISCV::PseudoVSPILL3_M2:
592 case RISCV::PseudoVSPILL4_M1:
593 case RISCV::PseudoVSPILL4_M2:
594 case RISCV::PseudoVSPILL5_M1:
595 case RISCV::PseudoVSPILL6_M1:
596 case RISCV::PseudoVSPILL7_M1:
597 case RISCV::PseudoVSPILL8_M1:
598 lowerSegmentSpillReload(II, /*IsSpill=*/true);
599 return true;
600 case RISCV::PseudoVRELOAD2_M1:
601 case RISCV::PseudoVRELOAD2_M2:
602 case RISCV::PseudoVRELOAD2_M4:
603 case RISCV::PseudoVRELOAD3_M1:
604 case RISCV::PseudoVRELOAD3_M2:
605 case RISCV::PseudoVRELOAD4_M1:
606 case RISCV::PseudoVRELOAD4_M2:
607 case RISCV::PseudoVRELOAD5_M1:
608 case RISCV::PseudoVRELOAD6_M1:
609 case RISCV::PseudoVRELOAD7_M1:
610 case RISCV::PseudoVRELOAD8_M1:
611 lowerSegmentSpillReload(II, /*IsSpill=*/false);
612 return true;
613 }
614
615 return false;
616}
617
619 const MachineFunction &MF) const {
620 return true;
621}
622
623// Returns true if the instruction's frame index reference would be better
624// served by a base register other than FP or SP.
625// Used by LocalStackSlotAllocation pass to determine which frame index
626// references it should create new base registers for.
628 int64_t Offset) const {
629 unsigned FIOperandNum = 0;
630 for (; !MI->getOperand(FIOperandNum).isFI(); FIOperandNum++)
631 assert(FIOperandNum < MI->getNumOperands() &&
632 "Instr doesn't have FrameIndex operand");
633
634 // For RISC-V, The machine instructions that include a FrameIndex operand
635 // are load/store, ADDI instructions.
636 unsigned MIFrm = RISCVII::getFormat(MI->getDesc().TSFlags);
637 if (MIFrm != RISCVII::InstFormatI && MIFrm != RISCVII::InstFormatS)
638 return false;
639 // We only generate virtual base registers for loads and stores, so
640 // return false for everything else.
641 if (!MI->mayLoad() && !MI->mayStore())
642 return false;
643
644 const MachineFunction &MF = *MI->getMF();
645 const MachineFrameInfo &MFI = MF.getFrameInfo();
646 const RISCVFrameLowering *TFI = getFrameLowering(MF);
647 const MachineRegisterInfo &MRI = MF.getRegInfo();
648
649 if (TFI->hasFP(MF) && !shouldRealignStack(MF)) {
650 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
651 // Estimate the stack size used to store callee saved registers(
652 // excludes reserved registers).
653 unsigned CalleeSavedSize = 0;
654 for (const MCPhysReg *R = MRI.getCalleeSavedRegs(); MCPhysReg Reg = *R;
655 ++R) {
656 if (Subtarget.isRegisterReservedByUser(Reg))
657 continue;
658
659 if (RISCV::GPRRegClass.contains(Reg))
660 CalleeSavedSize += getSpillSize(RISCV::GPRRegClass);
661 else if (RISCV::FPR64RegClass.contains(Reg))
662 CalleeSavedSize += getSpillSize(RISCV::FPR64RegClass);
663 else if (RISCV::FPR32RegClass.contains(Reg))
664 CalleeSavedSize += getSpillSize(RISCV::FPR32RegClass);
665 // Ignore vector registers.
666 }
667
668 int64_t MaxFPOffset = Offset - CalleeSavedSize;
669 return !isFrameOffsetLegal(MI, RISCV::X8, MaxFPOffset);
670 }
671
672 // Assume 128 bytes spill slots size to estimate the maximum possible
673 // offset relative to the stack pointer.
674 // FIXME: The 128 is copied from ARM. We should run some statistics and pick a
675 // real one for RISC-V.
676 int64_t MaxSPOffset = Offset + 128;
677 MaxSPOffset += MFI.getLocalFrameSize();
678 return !isFrameOffsetLegal(MI, RISCV::X2, MaxSPOffset);
679}
680
681// Determine whether a given base register plus offset immediate is
682// encodable to resolve a frame index.
684 Register BaseReg,
685 int64_t Offset) const {
686 unsigned FIOperandNum = 0;
687 while (!MI->getOperand(FIOperandNum).isFI()) {
688 FIOperandNum++;
689 assert(FIOperandNum < MI->getNumOperands() &&
690 "Instr does not have a FrameIndex operand!");
691 }
692
693 Offset += getFrameIndexInstrOffset(MI, FIOperandNum);
694 return isInt<12>(Offset);
695}
696
697// Insert defining instruction(s) for a pointer to FrameIdx before
698// insertion point I.
699// Return materialized frame pointer.
701 int FrameIdx,
702 int64_t Offset) const {
704 DebugLoc DL;
705 if (MBBI != MBB->end())
706 DL = MBBI->getDebugLoc();
707 MachineFunction *MF = MBB->getParent();
708 MachineRegisterInfo &MFI = MF->getRegInfo();
710
711 Register BaseReg = MFI.createVirtualRegister(&RISCV::GPRRegClass);
712 BuildMI(*MBB, MBBI, DL, TII->get(RISCV::ADDI), BaseReg)
713 .addFrameIndex(FrameIdx)
714 .addImm(Offset);
715 return BaseReg;
716}
717
718// Resolve a frame index operand of an instruction to reference the
719// indicated base register plus offset instead.
721 int64_t Offset) const {
722 unsigned FIOperandNum = 0;
723 while (!MI.getOperand(FIOperandNum).isFI()) {
724 FIOperandNum++;
725 assert(FIOperandNum < MI.getNumOperands() &&
726 "Instr does not have a FrameIndex operand!");
727 }
728
729 Offset += getFrameIndexInstrOffset(&MI, FIOperandNum);
730 // FrameIndex Operands are always represented as a
731 // register followed by an immediate.
732 MI.getOperand(FIOperandNum).ChangeToRegister(BaseReg, false);
733 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
734}
735
736// Get the offset from the referenced frame index in the instruction,
737// if there is one.
739 int Idx) const {
740 assert((RISCVII::getFormat(MI->getDesc().TSFlags) == RISCVII::InstFormatI ||
741 RISCVII::getFormat(MI->getDesc().TSFlags) == RISCVII::InstFormatS) &&
742 "The MI must be I or S format.");
743 assert(MI->getOperand(Idx).isFI() && "The Idx'th operand of MI is not a "
744 "FrameIndex operand");
745 return MI->getOperand(Idx + 1).getImm();
746}
747
749 const TargetFrameLowering *TFI = getFrameLowering(MF);
750 return TFI->hasFP(MF) ? RISCV::X8 : RISCV::X2;
751}
752
754 if (Reg == RISCV::SF_VCIX_STATE)
755 return "sf.vcix_state";
757}
758
759const uint32_t *
761 CallingConv::ID CC) const {
762 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
763
764 if (CC == CallingConv::GHC)
765 return CSR_NoRegs_RegMask;
766 RISCVABI::ABI ABI = Subtarget.getTargetABI();
767 if (CC == CallingConv::PreserveMost) {
768 if (ABI == RISCVABI::ABI_ILP32E || ABI == RISCVABI::ABI_LP64E)
769 return CSR_RT_MostRegs_RVE_RegMask;
770 return CSR_RT_MostRegs_RegMask;
771 }
772 switch (ABI) {
773 default:
774 llvm_unreachable("Unrecognized ABI");
777 return CSR_ILP32E_LP64E_RegMask;
781 return CSR_ILP32_LP64_V_RegMask;
782 return CSR_ILP32_LP64_RegMask;
786 return CSR_ILP32F_LP64F_V_RegMask;
787 return CSR_ILP32F_LP64F_RegMask;
791 return CSR_ILP32D_LP64D_V_RegMask;
792 return CSR_ILP32D_LP64D_RegMask;
793 }
794}
795
798 const MachineFunction &) const {
799 if (RC == &RISCV::VMV0RegClass)
800 return &RISCV::VRRegClass;
801 if (RC == &RISCV::VRNoV0RegClass)
802 return &RISCV::VRRegClass;
803 if (RC == &RISCV::VRM2NoV0RegClass)
804 return &RISCV::VRM2RegClass;
805 if (RC == &RISCV::VRM4NoV0RegClass)
806 return &RISCV::VRM4RegClass;
807 if (RC == &RISCV::VRM8NoV0RegClass)
808 return &RISCV::VRM8RegClass;
809 return RC;
810}
811
814 // VLENB is the length of a vector register in bytes. We use <vscale x 8 x i8>
815 // to represent one vector register. The dwarf offset is
816 // VLENB * scalable_offset / 8.
817 assert(Offset.getScalable() % 8 == 0 && "Invalid frame offset");
818
819 // Add fixed-sized offset using existing DIExpression interface.
821
822 unsigned VLENB = getDwarfRegNum(RISCV::VLENB, true);
823 int64_t VLENBSized = Offset.getScalable() / 8;
824 if (VLENBSized > 0) {
825 Ops.push_back(dwarf::DW_OP_constu);
826 Ops.push_back(VLENBSized);
827 Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL});
828 Ops.push_back(dwarf::DW_OP_mul);
829 Ops.push_back(dwarf::DW_OP_plus);
830 } else if (VLENBSized < 0) {
831 Ops.push_back(dwarf::DW_OP_constu);
832 Ops.push_back(-VLENBSized);
833 Ops.append({dwarf::DW_OP_bregx, VLENB, 0ULL});
834 Ops.push_back(dwarf::DW_OP_mul);
835 Ops.push_back(dwarf::DW_OP_minus);
836 }
837}
838
839unsigned
841 return MF.getSubtarget<RISCVSubtarget>().hasStdExtZca() && !DisableCostPerUse
842 ? 1
843 : 0;
844}
845
847 const TargetRegisterClass *RC) const {
848 return getRegClassWeight(RC).RegWeight;
849}
850
851// Add two address hints to improve chances of being able to use a compressed
852// instruction.
854 Register VirtReg, ArrayRef<MCPhysReg> Order,
856 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
857 const MachineRegisterInfo *MRI = &MF.getRegInfo();
858 auto &Subtarget = MF.getSubtarget<RISCVSubtarget>();
859
860 bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints(
861 VirtReg, Order, Hints, MF, VRM, Matrix);
862
863 if (!VRM || DisableRegAllocHints)
864 return BaseImplRetVal;
865
866 // Add any two address hints after any copy hints.
867 SmallSet<Register, 4> TwoAddrHints;
868
869 auto tryAddHint = [&](const MachineOperand &VRRegMO, const MachineOperand &MO,
870 bool NeedGPRC) -> void {
871 Register Reg = MO.getReg();
872 Register PhysReg = Reg.isPhysical() ? Reg : Register(VRM->getPhys(Reg));
873 // TODO: Support GPRPair subregisters? Need to be careful with even/odd
874 // registers. If the virtual register is an odd register of a pair and the
875 // physical register is even (or vice versa), we should not add the hint.
876 if (PhysReg && (!NeedGPRC || RISCV::GPRCRegClass.contains(PhysReg)) &&
877 !MO.getSubReg() && !VRRegMO.getSubReg()) {
878 if (!MRI->isReserved(PhysReg) && !is_contained(Hints, PhysReg))
879 TwoAddrHints.insert(PhysReg);
880 }
881 };
882
883 // This is all of the compressible binary instructions. If an instruction
884 // needs GPRC register class operands \p NeedGPRC will be set to true.
885 auto isCompressible = [&Subtarget](const MachineInstr &MI, bool &NeedGPRC) {
886 NeedGPRC = false;
887 switch (MI.getOpcode()) {
888 default:
889 return false;
890 case RISCV::AND:
891 case RISCV::OR:
892 case RISCV::XOR:
893 case RISCV::SUB:
894 case RISCV::ADDW:
895 case RISCV::SUBW:
896 NeedGPRC = true;
897 return true;
898 case RISCV::ANDI: {
899 NeedGPRC = true;
900 if (!MI.getOperand(2).isImm())
901 return false;
902 int64_t Imm = MI.getOperand(2).getImm();
903 if (isInt<6>(Imm))
904 return true;
905 // c.zext.b
906 return Subtarget.hasStdExtZcb() && Imm == 255;
907 }
908 case RISCV::SRAI:
909 case RISCV::SRLI:
910 NeedGPRC = true;
911 return true;
912 case RISCV::ADD:
913 case RISCV::SLLI:
914 return true;
915 case RISCV::ADDI:
916 case RISCV::ADDIW:
917 return MI.getOperand(2).isImm() && isInt<6>(MI.getOperand(2).getImm());
918 case RISCV::MUL:
919 case RISCV::SEXT_B:
920 case RISCV::SEXT_H:
921 case RISCV::ZEXT_H_RV32:
922 case RISCV::ZEXT_H_RV64:
923 // c.mul, c.sext.b, c.sext.h, c.zext.h
924 NeedGPRC = true;
925 return Subtarget.hasStdExtZcb();
926 case RISCV::ADD_UW:
927 // c.zext.w
928 NeedGPRC = true;
929 return Subtarget.hasStdExtZcb() && MI.getOperand(2).isReg() &&
930 MI.getOperand(2).getReg() == RISCV::X0;
931 case RISCV::XORI:
932 // c.not
933 NeedGPRC = true;
934 return Subtarget.hasStdExtZcb() && MI.getOperand(2).isImm() &&
935 MI.getOperand(2).getImm() == -1;
936 }
937 };
938
939 // Returns true if this operand is compressible. For non-registers it always
940 // returns true. Immediate range was already checked in isCompressible.
941 // For registers, it checks if the register is a GPRC register. reg-reg
942 // instructions that require GPRC need all register operands to be GPRC.
943 auto isCompressibleOpnd = [&](const MachineOperand &MO) {
944 if (!MO.isReg())
945 return true;
946 Register Reg = MO.getReg();
947 Register PhysReg = Reg.isPhysical() ? Reg : Register(VRM->getPhys(Reg));
948 return PhysReg && RISCV::GPRCRegClass.contains(PhysReg);
949 };
950
951 for (auto &MO : MRI->reg_nodbg_operands(VirtReg)) {
952 const MachineInstr &MI = *MO.getParent();
953 unsigned OpIdx = MO.getOperandNo();
954 bool NeedGPRC;
955 if (isCompressible(MI, NeedGPRC)) {
956 if (OpIdx == 0 && MI.getOperand(1).isReg()) {
957 if (!NeedGPRC || MI.getNumExplicitOperands() < 3 ||
958 MI.getOpcode() == RISCV::ADD_UW ||
959 isCompressibleOpnd(MI.getOperand(2)))
960 tryAddHint(MO, MI.getOperand(1), NeedGPRC);
961 if (MI.isCommutable() && MI.getOperand(2).isReg() &&
962 (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1))))
963 tryAddHint(MO, MI.getOperand(2), NeedGPRC);
964 } else if (OpIdx == 1 && (!NeedGPRC || MI.getNumExplicitOperands() < 3 ||
965 isCompressibleOpnd(MI.getOperand(2)))) {
966 tryAddHint(MO, MI.getOperand(0), NeedGPRC);
967 } else if (MI.isCommutable() && OpIdx == 2 &&
968 (!NeedGPRC || isCompressibleOpnd(MI.getOperand(1)))) {
969 tryAddHint(MO, MI.getOperand(0), NeedGPRC);
970 }
971 }
972
973 // Add a hint if it would allow auipc/lui+addi(w) fusion. We do this even
974 // without the fusions explicitly enabled as the impact is rarely negative
975 // and some cores do implement this fusion.
976 if ((MI.getOpcode() == RISCV::ADDIW || MI.getOpcode() == RISCV::ADDI) &&
977 MI.getOperand(1).isReg()) {
978 const MachineBasicBlock &MBB = *MI.getParent();
979 MachineBasicBlock::const_iterator I = MI.getIterator();
980 // Is the previous instruction a LUI or AUIPC that can be fused?
981 if (I != MBB.begin()) {
982 I = skipDebugInstructionsBackward(std::prev(I), MBB.begin());
983 if ((I->getOpcode() == RISCV::LUI || I->getOpcode() == RISCV::AUIPC) &&
984 I->getOperand(0).getReg() == MI.getOperand(1).getReg()) {
985 if (OpIdx == 0)
986 tryAddHint(MO, MI.getOperand(1), /*NeedGPRC=*/false);
987 else
988 tryAddHint(MO, MI.getOperand(0), /*NeedGPRC=*/false);
989 }
990 }
991 }
992 }
993
994 for (MCPhysReg OrderReg : Order)
995 if (TwoAddrHints.count(OrderReg))
996 Hints.push_back(OrderReg);
997
998 return BaseImplRetVal;
999}
1000
1003 uint16_t Encoding) const {
1004 MCRegister Reg = RISCV::V0 + Encoding;
1006 return Reg;
1007 return getMatchingSuperReg(Reg, RISCV::sub_vrm1_0, &RegClass);
1008}
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
#define _
IRTranslator LLVM IR MI
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Live Register Matrix
#define I(x, y, z)
Definition MD5.cpp:58
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static cl::opt< bool > DisableRegAllocHints("riscv-disable-regalloc-hints", cl::Hidden, cl::init(false), cl::desc("Disable two address hints for register " "allocation"))
static cl::opt< bool > DisableCostPerUse("riscv-disable-cost-per-use", cl::init(false), cl::Hidden)
static std::tuple< RISCVVType::VLMUL, const TargetRegisterClass &, unsigned > getSpillReloadInfo(unsigned NumRemaining, uint16_t RegEncoding, bool IsSpill)
This file declares the machine register scavenger class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:480
This file defines the SmallSet class.
static unsigned getDwarfRegNum(MCRegister Reg, const TargetRegisterInfo *TRI)
Go up the super-register chain until we hit a valid dwarf register number.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
static LLVM_ABI void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
A debug info location.
Definition DebugLoc.h:124
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition Function.h:270
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition Function.cpp:727
TypeSize getValue() const
bool isPrecise() const
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:33
MachineInstrBundleIterator< const MachineInstr > const_iterator
MachineInstrBundleIterator< MachineInstr > iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int64_t getLocalFrameSize() const
Get the size of the local object blob.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool hasBP(const MachineFunction &MF) const
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
std::optional< unsigned > getRealVLen() const
const RISCVRegisterInfo * getRegisterInfo() const override
const RISCVInstrInfo * getInstrInfo() const override
Wrapper class representing virtual and physical registers.
Definition Register.h:19
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:78
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition SmallSet.h:133
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition SmallSet.h:175
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:183
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
StackOffset holds a fixed and a scalable offset in bytes.
Definition TypeSize.h:31
int64_t getFixed() const
Returns the fixed component of the stack.
Definition TypeSize.h:47
static StackOffset get(int64_t Fixed, int64_t Scalable)
Definition TypeSize.h:42
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Information about stack frame layout on the target.
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
TargetInstrInfo - Interface to description of machine instruction set.
const uint8_t TSFlags
Configurable target specific flags.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual StringRef getRegAsmName(MCRegister Reg) const
Return the assembly name for Reg.
virtual bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM=nullptr, const LiveRegMatrix *Matrix=nullptr) const
Get a list of 'hint' registers that the register allocator should try first when allocating a physica...
virtual bool isRegisterReservedByUser(Register R) const
virtual const TargetInstrInfo * getInstrInfo() const
MCRegister getPhys(Register virtReg) const
returns the physical register mapped to the specified virtual register
Definition VirtRegMap.h:91
constexpr bool isKnownMultipleOf(ScalarTy RHS) const
This function tells the caller whether the element count is known at compile time to be a multiple of...
Definition TypeSize.h:181
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
Definition TypeSize.h:253
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ RISCV_VectorCall
Calling convention used for RISC-V V-extension.
@ PreserveMost
Used for runtime calls that preserves most registers.
Definition CallingConv.h:63
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
Definition CallingConv.h:50
@ GRAAL
Used by GraalVM. Two additional registers are reserved.
MCRegister getBPReg()
static unsigned getFormat(uint64_t TSFlags)
static RISCVVType::VLMUL getLMul(uint8_t TSFlags)
LLVM_ABI std::pair< unsigned, bool > decodeVLMUL(VLMUL VLMul)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
bool isRVVSpill(const MachineInstr &MI)
static constexpr unsigned RVVBytesPerBlock
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Kill
The last use of a register.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:477
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
IterT skipDebugInstructionsBackward(IterT It, IterT Begin, bool SkipPseudoOp=true)
Decrement It until it points to a non-debug instruction or to Begin and return the resulting iterator...
unsigned getDefRegState(bool B)
unsigned getKillRegState(bool B)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Definition MCRegister.h:21
constexpr bool isShiftedInt(int64_t x)
Checks if a signed integer is an N bit number shifted left by S.
Definition MathExtras.h:182
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1897
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
Definition MathExtras.h:572
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:180
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
Definition Alignment.h:77
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition Alignment.h:130
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override
Register findVRegWithEncoding(const TargetRegisterClass &RegClass, uint16_t Encoding) const
const TargetRegisterClass * getLargestLegalSuperClass(const TargetRegisterClass *RC, const MachineFunction &) const override
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
Register materializeFrameBaseRegister(MachineBasicBlock *MBB, int FrameIdx, int64_t Offset) const override
RISCVRegisterInfo(unsigned HwMode)
void getOffsetOpcodes(const StackOffset &Offset, SmallVectorImpl< uint64_t > &Ops) const override
bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, int64_t Offset) const override
Register getFrameRegister(const MachineFunction &MF) const override
const MCPhysReg * getIPRACSRegs(const MachineFunction *MF) const override
void lowerSegmentSpillReload(MachineBasicBlock::iterator II, bool IsSpill) const
void adjustReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, Register SrcReg, StackOffset Offset, MachineInstr::MIFlag Flag, MaybeAlign RequiredAlign) const
bool isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const override
const uint32_t * getNoPreservedMask() const override
float getSpillWeightScaleFactor(const TargetRegisterClass *RC) const override
void resolveFrameIndex(MachineInstr &MI, Register BaseReg, int64_t Offset) const override
bool getRegAllocationHints(Register VirtReg, ArrayRef< MCPhysReg > Order, SmallVectorImpl< MCPhysReg > &Hints, const MachineFunction &MF, const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const override
int64_t getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const override
unsigned getRegisterCostTableIndex(const MachineFunction &MF) const override
StringRef getRegAsmName(MCRegister Reg) const override
bool eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const override