LLVM 23.0.0git
RISCVInstructionSelector.cpp
Go to the documentation of this file.
1//===-- RISCVInstructionSelector.cpp -----------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// RISC-V.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
16#include "RISCVSubtarget.h"
17#include "RISCVTargetMachine.h"
25#include "llvm/IR/IntrinsicsRISCV.h"
26#include "llvm/Support/Debug.h"
27
28#define DEBUG_TYPE "riscv-isel"
29
30using namespace llvm;
31using namespace MIPatternMatch;
32
33#define GET_GLOBALISEL_PREDICATE_BITSET
34#include "RISCVGenGlobalISel.inc"
35#undef GET_GLOBALISEL_PREDICATE_BITSET
36
37namespace {
38
39class RISCVInstructionSelector : public InstructionSelector {
40public:
41 RISCVInstructionSelector(const RISCVTargetMachine &TM,
42 const RISCVSubtarget &STI,
43 const RISCVRegisterBankInfo &RBI);
44
45 bool select(MachineInstr &MI) override;
46
47 void setupMF(MachineFunction &MF, GISelValueTracking *VT,
48 CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
49 BlockFrequencyInfo *BFI) override {
50 InstructionSelector::setupMF(MF, VT, CoverageInfo, PSI, BFI);
51 MRI = &MF.getRegInfo();
52 }
53
54 static const char *getName() { return DEBUG_TYPE; }
55
56private:
58 getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB) const;
59
60 static constexpr unsigned MaxRecursionDepth = 6;
61
62 bool hasAllNBitUsers(const MachineInstr &MI, unsigned Bits,
63 const unsigned Depth = 0) const;
64 bool hasAllHUsers(const MachineInstr &MI) const {
65 return hasAllNBitUsers(MI, 16);
66 }
67 bool hasAllWUsers(const MachineInstr &MI) const {
68 return hasAllNBitUsers(MI, 32);
69 }
70
71 bool isRegInGprb(Register Reg) const;
72 bool isRegInFprb(Register Reg) const;
73
74 // tblgen-erated 'select' implementation, used as the initial selector for
75 // the patterns that don't require complex C++.
76 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
77
78 // A lowering phase that runs before any selection attempts.
79 // Returns true if the instruction was modified.
80 void preISelLower(MachineInstr &MI, MachineIRBuilder &MIB);
81
82 bool replacePtrWithInt(MachineOperand &Op, MachineIRBuilder &MIB);
83
84 // Custom selection methods
85 bool selectCopy(MachineInstr &MI) const;
86 bool selectImplicitDef(MachineInstr &MI, MachineIRBuilder &MIB) const;
87 bool materializeImm(Register Reg, int64_t Imm, MachineIRBuilder &MIB) const;
88 bool selectAddr(MachineInstr &MI, MachineIRBuilder &MIB, bool IsLocal = true,
89 bool IsExternWeak = false) const;
90 bool selectSelect(MachineInstr &MI, MachineIRBuilder &MIB) const;
91 bool selectFPCompare(MachineInstr &MI, MachineIRBuilder &MIB) const;
92 void emitFence(AtomicOrdering FenceOrdering, SyncScope::ID FenceSSID,
93 MachineIRBuilder &MIB) const;
95 void addVectorLoadStoreOperands(MachineInstr &I,
97 unsigned &CurOp, bool IsMasked,
98 bool IsStridedOrIndexed,
99 LLT *IndexVT = nullptr) const;
100 bool selectIntrinsicWithSideEffects(MachineInstr &I,
101 MachineIRBuilder &MIB) const;
102 bool selectIntrinsic(MachineInstr &I, MachineIRBuilder &MIB) const;
103 bool selectExtractSubvector(MachineInstr &MI, MachineIRBuilder &MIB) const;
104
105 ComplexRendererFns selectShiftMask(MachineOperand &Root,
106 unsigned ShiftWidth) const;
107 ComplexRendererFns selectShiftMaskXLen(MachineOperand &Root) const {
108 return selectShiftMask(Root, STI.getXLen());
109 }
110 ComplexRendererFns selectShiftMask32(MachineOperand &Root) const {
111 return selectShiftMask(Root, 32);
112 }
113 ComplexRendererFns selectAddrRegImm(MachineOperand &Root) const;
114
115 ComplexRendererFns selectSExtBits(MachineOperand &Root, unsigned Bits) const;
116 template <unsigned Bits>
117 ComplexRendererFns selectSExtBits(MachineOperand &Root) const {
118 return selectSExtBits(Root, Bits);
119 }
120
121 ComplexRendererFns selectZExtBits(MachineOperand &Root, unsigned Bits) const;
122 template <unsigned Bits>
123 ComplexRendererFns selectZExtBits(MachineOperand &Root) const {
124 return selectZExtBits(Root, Bits);
125 }
126
127 ComplexRendererFns selectSHXADDOp(MachineOperand &Root, unsigned ShAmt) const;
128 template <unsigned ShAmt>
129 ComplexRendererFns selectSHXADDOp(MachineOperand &Root) const {
130 return selectSHXADDOp(Root, ShAmt);
131 }
132
133 ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root,
134 unsigned ShAmt) const;
135 template <unsigned ShAmt>
136 ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root) const {
137 return selectSHXADD_UWOp(Root, ShAmt);
138 }
139
140 ComplexRendererFns renderVLOp(MachineOperand &Root) const;
141
142 // Custom renderers for tablegen
143 void renderNegImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
144 int OpIdx) const;
145 void renderImmSubFromXLen(MachineInstrBuilder &MIB, const MachineInstr &MI,
146 int OpIdx) const;
147 void renderImmSubFrom32(MachineInstrBuilder &MIB, const MachineInstr &MI,
148 int OpIdx) const;
149 void renderImmPlus1(MachineInstrBuilder &MIB, const MachineInstr &MI,
150 int OpIdx) const;
151 void renderFrameIndex(MachineInstrBuilder &MIB, const MachineInstr &MI,
152 int OpIdx) const;
153
154 void renderTrailingZeros(MachineInstrBuilder &MIB, const MachineInstr &MI,
155 int OpIdx) const;
156 void renderXLenSubTrailingOnes(MachineInstrBuilder &MIB,
157 const MachineInstr &MI, int OpIdx) const;
158
159 void renderAddiPairImmLarge(MachineInstrBuilder &MIB, const MachineInstr &MI,
160 int OpIdx) const;
161 void renderAddiPairImmSmall(MachineInstrBuilder &MIB, const MachineInstr &MI,
162 int OpIdx) const;
163
164 const RISCVSubtarget &STI;
165 const RISCVInstrInfo &TII;
166 const RISCVRegisterInfo &TRI;
167 const RISCVRegisterBankInfo &RBI;
168 const RISCVTargetMachine &TM;
169
170 MachineRegisterInfo *MRI = nullptr;
171
172 // FIXME: This is necessary because DAGISel uses "Subtarget->" and GlobalISel
173 // uses "STI." in the code generated by TableGen. We need to unify the name of
174 // Subtarget variable.
175 const RISCVSubtarget *Subtarget = &STI;
176
177#define GET_GLOBALISEL_PREDICATES_DECL
178#include "RISCVGenGlobalISel.inc"
179#undef GET_GLOBALISEL_PREDICATES_DECL
180
181#define GET_GLOBALISEL_TEMPORARIES_DECL
182#include "RISCVGenGlobalISel.inc"
183#undef GET_GLOBALISEL_TEMPORARIES_DECL
184};
185
186} // end anonymous namespace
187
188#define GET_GLOBALISEL_IMPL
189#include "RISCVGenGlobalISel.inc"
190#undef GET_GLOBALISEL_IMPL
191
192RISCVInstructionSelector::RISCVInstructionSelector(
193 const RISCVTargetMachine &TM, const RISCVSubtarget &STI,
194 const RISCVRegisterBankInfo &RBI)
195 : STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI),
196 TM(TM),
197
199#include "RISCVGenGlobalISel.inc"
202#include "RISCVGenGlobalISel.inc"
204{
205}
206
207// Mimics optimizations in ISel and RISCVOptWInst Pass
208bool RISCVInstructionSelector::hasAllNBitUsers(const MachineInstr &MI,
209 unsigned Bits,
210 const unsigned Depth) const {
211
212 assert((MI.getOpcode() == TargetOpcode::G_ADD ||
213 MI.getOpcode() == TargetOpcode::G_SUB ||
214 MI.getOpcode() == TargetOpcode::G_MUL ||
215 MI.getOpcode() == TargetOpcode::G_SHL ||
216 MI.getOpcode() == TargetOpcode::G_LSHR ||
217 MI.getOpcode() == TargetOpcode::G_AND ||
218 MI.getOpcode() == TargetOpcode::G_OR ||
219 MI.getOpcode() == TargetOpcode::G_XOR ||
220 MI.getOpcode() == TargetOpcode::G_SEXT_INREG || Depth != 0) &&
221 "Unexpected opcode");
222
223 if (Depth >= RISCVInstructionSelector::MaxRecursionDepth)
224 return false;
225
226 auto DestReg = MI.getOperand(0).getReg();
227 for (auto &UserOp : MRI->use_nodbg_operands(DestReg)) {
228 assert(UserOp.getParent() && "UserOp must have a parent");
229 const MachineInstr &UserMI = *UserOp.getParent();
230 unsigned OpIdx = UserOp.getOperandNo();
231
232 switch (UserMI.getOpcode()) {
233 default:
234 return false;
235 case RISCV::ADDW:
236 case RISCV::ADDIW:
237 case RISCV::SUBW:
238 case RISCV::FCVT_D_W:
239 case RISCV::FCVT_S_W:
240 if (Bits >= 32)
241 break;
242 return false;
243 case RISCV::SLL:
244 case RISCV::SRA:
245 case RISCV::SRL:
246 // Shift amount operands only use log2(Xlen) bits.
247 if (OpIdx == 2 && Bits >= Log2_32(Subtarget->getXLen()))
248 break;
249 return false;
250 case RISCV::SLLI:
251 // SLLI only uses the lower (XLen - ShAmt) bits.
252 if (Bits >= Subtarget->getXLen() - UserMI.getOperand(2).getImm())
253 break;
254 return false;
255 case RISCV::ANDI:
256 if (Bits >= (unsigned)llvm::bit_width<uint64_t>(
257 (uint64_t)UserMI.getOperand(2).getImm()))
258 break;
259 goto RecCheck;
260 case RISCV::AND:
261 case RISCV::OR:
262 case RISCV::XOR:
263 RecCheck:
264 if (hasAllNBitUsers(UserMI, Bits, Depth + 1))
265 break;
266 return false;
267 case RISCV::SRLI: {
268 unsigned ShAmt = UserMI.getOperand(2).getImm();
269 // If we are shifting right by less than Bits, and users don't demand any
270 // bits that were shifted into [Bits-1:0], then we can consider this as an
271 // N-Bit user.
272 if (Bits > ShAmt && hasAllNBitUsers(UserMI, Bits - ShAmt, Depth + 1))
273 break;
274 return false;
275 }
276 }
277 }
278
279 return true;
280}
281
282InstructionSelector::ComplexRendererFns
283RISCVInstructionSelector::selectShiftMask(MachineOperand &Root,
284 unsigned ShiftWidth) const {
285 if (!Root.isReg())
286 return std::nullopt;
287
288 using namespace llvm::MIPatternMatch;
289
290 Register ShAmtReg = Root.getReg();
291 // Peek through zext.
292 Register ZExtSrcReg;
293 if (mi_match(ShAmtReg, *MRI, m_GZExt(m_Reg(ZExtSrcReg))))
294 ShAmtReg = ZExtSrcReg;
295
296 APInt AndMask;
297 Register AndSrcReg;
298 // Try to combine the following pattern (applicable to other shift
299 // instructions as well as 32-bit ones):
300 //
301 // %4:gprb(s64) = G_AND %3, %2
302 // %5:gprb(s64) = G_LSHR %1, %4(s64)
303 //
304 // According to RISC-V's ISA manual, SLL, SRL, and SRA ignore other bits than
305 // the lowest log2(XLEN) bits of register rs2. As for the above pattern, if
306 // the lowest log2(XLEN) bits of register rd and rs2 of G_AND are the same,
307 // then it can be eliminated. Given register rs1 or rs2 holding a constant
308 // (the and mask), there are two cases G_AND can be erased:
309 //
310 // 1. the lowest log2(XLEN) bits of the and mask are all set
311 // 2. the bits of the register being masked are already unset (zero set)
312 if (mi_match(ShAmtReg, *MRI, m_GAnd(m_Reg(AndSrcReg), m_ICst(AndMask)))) {
313 APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
314 if (ShMask.isSubsetOf(AndMask)) {
315 ShAmtReg = AndSrcReg;
316 } else {
317 // SimplifyDemandedBits may have optimized the mask so try restoring any
318 // bits that are known zero.
319 KnownBits Known = VT->getKnownBits(AndSrcReg);
320 if (ShMask.isSubsetOf(AndMask | Known.Zero))
321 ShAmtReg = AndSrcReg;
322 }
323 }
324
325 APInt Imm;
327 if (mi_match(ShAmtReg, *MRI, m_GAdd(m_Reg(Reg), m_ICst(Imm)))) {
328 if (Imm != 0 && Imm.urem(ShiftWidth) == 0)
329 // If we are shifting by X+N where N == 0 mod Size, then just shift by X
330 // to avoid the ADD.
331 ShAmtReg = Reg;
332 } else if (mi_match(ShAmtReg, *MRI, m_GSub(m_ICst(Imm), m_Reg(Reg)))) {
333 if (Imm != 0 && Imm.urem(ShiftWidth) == 0) {
334 // If we are shifting by N-X where N == 0 mod Size, then just shift by -X
335 // to generate a NEG instead of a SUB of a constant.
336 ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
337 unsigned NegOpc = Subtarget->is64Bit() ? RISCV::SUBW : RISCV::SUB;
338 return {{[=](MachineInstrBuilder &MIB) {
339 MachineIRBuilder(*MIB.getInstr())
340 .buildInstr(NegOpc, {ShAmtReg}, {Register(RISCV::X0), Reg});
341 MIB.addReg(ShAmtReg);
342 }}};
343 }
344 if (Imm.urem(ShiftWidth) == ShiftWidth - 1) {
345 // If we are shifting by N-X where N == -1 mod Size, then just shift by ~X
346 // to generate a NOT instead of a SUB of a constant.
347 ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
348 return {{[=](MachineInstrBuilder &MIB) {
349 MachineIRBuilder(*MIB.getInstr())
350 .buildInstr(RISCV::XORI, {ShAmtReg}, {Reg})
351 .addImm(-1);
352 MIB.addReg(ShAmtReg);
353 }}};
354 }
355 }
356
357 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(ShAmtReg); }}};
358}
359
360InstructionSelector::ComplexRendererFns
361RISCVInstructionSelector::selectSExtBits(MachineOperand &Root,
362 unsigned Bits) const {
363 if (!Root.isReg())
364 return std::nullopt;
365 Register RootReg = Root.getReg();
366 MachineInstr *RootDef = MRI->getVRegDef(RootReg);
367
368 if (RootDef->getOpcode() == TargetOpcode::G_SEXT_INREG &&
369 RootDef->getOperand(2).getImm() == Bits) {
370 return {
371 {[=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); }}};
372 }
373
374 unsigned Size = MRI->getType(RootReg).getScalarSizeInBits();
375 if ((Size - VT->computeNumSignBits(RootReg)) < Bits)
376 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
377
378 return std::nullopt;
379}
380
381InstructionSelector::ComplexRendererFns
382RISCVInstructionSelector::selectZExtBits(MachineOperand &Root,
383 unsigned Bits) const {
384 if (!Root.isReg())
385 return std::nullopt;
386 Register RootReg = Root.getReg();
387
388 Register RegX;
389 uint64_t Mask = maskTrailingOnes<uint64_t>(Bits);
390 if (mi_match(RootReg, *MRI, m_GAnd(m_Reg(RegX), m_SpecificICst(Mask)))) {
391 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
392 }
393
394 if (mi_match(RootReg, *MRI, m_GZExt(m_Reg(RegX))) &&
395 MRI->getType(RegX).getScalarSizeInBits() == Bits)
396 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
397
398 unsigned Size = MRI->getType(RootReg).getScalarSizeInBits();
399 if (VT->maskedValueIsZero(RootReg, APInt::getBitsSetFrom(Size, Bits)))
400 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
401
402 return std::nullopt;
403}
404
405InstructionSelector::ComplexRendererFns
406RISCVInstructionSelector::selectSHXADDOp(MachineOperand &Root,
407 unsigned ShAmt) const {
408 using namespace llvm::MIPatternMatch;
409
410 if (!Root.isReg())
411 return std::nullopt;
412 Register RootReg = Root.getReg();
413
414 const unsigned XLen = STI.getXLen();
415 APInt Mask, C2;
416 Register RegY;
417 std::optional<bool> LeftShift;
418 // (and (shl y, c2), mask)
419 if (mi_match(RootReg, *MRI,
420 m_GAnd(m_GShl(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask))))
421 LeftShift = true;
422 // (and (lshr y, c2), mask)
423 else if (mi_match(RootReg, *MRI,
424 m_GAnd(m_GLShr(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask))))
425 LeftShift = false;
426
427 if (LeftShift.has_value()) {
428 if (*LeftShift)
430 else
432
433 if (Mask.isShiftedMask()) {
434 unsigned Leading = XLen - Mask.getActiveBits();
435 unsigned Trailing = Mask.countr_zero();
436 // Given (and (shl y, c2), mask) in which mask has no leading zeros and
437 // c3 trailing zeros. We can use an SRLI by c3 - c2 followed by a SHXADD.
438 if (*LeftShift && Leading == 0 && C2.ult(Trailing) && Trailing == ShAmt) {
439 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
440 return {{[=](MachineInstrBuilder &MIB) {
441 MachineIRBuilder(*MIB.getInstr())
442 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
443 .addImm(Trailing - C2.getLimitedValue());
444 MIB.addReg(DstReg);
445 }}};
446 }
447
448 // Given (and (lshr y, c2), mask) in which mask has c2 leading zeros and
449 // c3 trailing zeros. We can use an SRLI by c2 + c3 followed by a SHXADD.
450 if (!*LeftShift && Leading == C2 && Trailing == ShAmt) {
451 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
452 return {{[=](MachineInstrBuilder &MIB) {
453 MachineIRBuilder(*MIB.getInstr())
454 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
455 .addImm(Leading + Trailing);
456 MIB.addReg(DstReg);
457 }}};
458 }
459 }
460 }
461
462 LeftShift.reset();
463
464 // (shl (and y, mask), c2)
465 if (mi_match(RootReg, *MRI,
466 m_GShl(m_OneNonDBGUse(m_GAnd(m_Reg(RegY), m_ICst(Mask))),
467 m_ICst(C2))))
468 LeftShift = true;
469 // (lshr (and y, mask), c2)
470 else if (mi_match(RootReg, *MRI,
472 m_ICst(C2))))
473 LeftShift = false;
474
475 if (LeftShift.has_value() && Mask.isShiftedMask()) {
476 unsigned Leading = XLen - Mask.getActiveBits();
477 unsigned Trailing = Mask.countr_zero();
478
479 // Given (shl (and y, mask), c2) in which mask has 32 leading zeros and
480 // c3 trailing zeros. If c1 + c3 == ShAmt, we can emit SRLIW + SHXADD.
481 bool Cond = *LeftShift && Leading == 32 && Trailing > 0 &&
482 (Trailing + C2.getLimitedValue()) == ShAmt;
483 if (!Cond)
484 // Given (lshr (and y, mask), c2) in which mask has 32 leading zeros and
485 // c3 trailing zeros. If c3 - c1 == ShAmt, we can emit SRLIW + SHXADD.
486 Cond = !*LeftShift && Leading == 32 && C2.ult(Trailing) &&
487 (Trailing - C2.getLimitedValue()) == ShAmt;
488
489 if (Cond) {
490 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
491 return {{[=](MachineInstrBuilder &MIB) {
492 MachineIRBuilder(*MIB.getInstr())
493 .buildInstr(RISCV::SRLIW, {DstReg}, {RegY})
494 .addImm(Trailing);
495 MIB.addReg(DstReg);
496 }}};
497 }
498 }
499
500 return std::nullopt;
501}
502
503InstructionSelector::ComplexRendererFns
504RISCVInstructionSelector::selectSHXADD_UWOp(MachineOperand &Root,
505 unsigned ShAmt) const {
506 using namespace llvm::MIPatternMatch;
507
508 if (!Root.isReg())
509 return std::nullopt;
510 Register RootReg = Root.getReg();
511
512 // Given (and (shl x, c2), mask) in which mask is a shifted mask with
513 // 32 - ShAmt leading zeros and c2 trailing zeros. We can use SLLI by
514 // c2 - ShAmt followed by SHXADD_UW with ShAmt for x amount.
515 APInt Mask, C2;
516 Register RegX;
517 if (mi_match(
518 RootReg, *MRI,
520 m_ICst(Mask))))) {
522
523 if (Mask.isShiftedMask()) {
524 unsigned Leading = Mask.countl_zero();
525 unsigned Trailing = Mask.countr_zero();
526 if (Leading == 32 - ShAmt && C2 == Trailing && Trailing > ShAmt) {
527 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
528 return {{[=](MachineInstrBuilder &MIB) {
529 MachineIRBuilder(*MIB.getInstr())
530 .buildInstr(RISCV::SLLI, {DstReg}, {RegX})
531 .addImm(C2.getLimitedValue() - ShAmt);
532 MIB.addReg(DstReg);
533 }}};
534 }
535 }
536 }
537
538 return std::nullopt;
539}
540
541InstructionSelector::ComplexRendererFns
542RISCVInstructionSelector::renderVLOp(MachineOperand &Root) const {
543 assert(Root.isReg() && "Expected operand to be a Register");
544 MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
545
546 if (RootDef->getOpcode() == TargetOpcode::G_CONSTANT) {
547 auto C = RootDef->getOperand(1).getCImm();
548 if (C->getValue().isAllOnes())
549 // If the operand is a G_CONSTANT with value of all ones it is larger than
550 // VLMAX. We convert it to an immediate with value VLMaxSentinel. This is
551 // recognized specially by the vsetvli insertion pass.
552 return {{[=](MachineInstrBuilder &MIB) {
553 MIB.addImm(RISCV::VLMaxSentinel);
554 }}};
555
556 if (isUInt<5>(C->getZExtValue())) {
557 uint64_t ZExtC = C->getZExtValue();
558 return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(ZExtC); }}};
559 }
560 }
561 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); }}};
562}
563
564InstructionSelector::ComplexRendererFns
565RISCVInstructionSelector::selectAddrRegImm(MachineOperand &Root) const {
566 if (!Root.isReg())
567 return std::nullopt;
568
569 MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
570 if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
571 return {{
572 [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); },
573 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
574 }};
575 }
576
577 if (isBaseWithConstantOffset(Root, *MRI)) {
578 MachineOperand &LHS = RootDef->getOperand(1);
579 MachineOperand &RHS = RootDef->getOperand(2);
580 MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
581 MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
582
583 int64_t RHSC = RHSDef->getOperand(1).getCImm()->getSExtValue();
584 if (isInt<12>(RHSC)) {
585 if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
586 return {{
587 [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->getOperand(1)); },
588 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
589 }};
590
591 return {{[=](MachineInstrBuilder &MIB) { MIB.add(LHS); },
592 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); }}};
593 }
594 }
595
596 // TODO: Need to get the immediate from a G_PTR_ADD. Should this be done in
597 // the combiner?
598 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
599 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }}};
600}
601
602/// Returns the RISCVCC::CondCode that corresponds to the CmpInst::Predicate CC.
603/// CC Must be an ICMP Predicate.
604static RISCVCC::CondCode getRISCVCCFromICmp(CmpInst::Predicate CC) {
605 switch (CC) {
606 default:
607 llvm_unreachable("Expected ICMP CmpInst::Predicate.");
608 case CmpInst::Predicate::ICMP_EQ:
609 return RISCVCC::COND_EQ;
610 case CmpInst::Predicate::ICMP_NE:
611 return RISCVCC::COND_NE;
612 case CmpInst::Predicate::ICMP_ULT:
613 return RISCVCC::COND_LTU;
614 case CmpInst::Predicate::ICMP_SLT:
615 return RISCVCC::COND_LT;
616 case CmpInst::Predicate::ICMP_UGE:
617 return RISCVCC::COND_GEU;
618 case CmpInst::Predicate::ICMP_SGE:
619 return RISCVCC::COND_GE;
620 }
621}
622
626 // Try to fold an ICmp. If that fails, use a NE compare with X0.
628 if (!mi_match(CondReg, MRI, m_GICmp(m_Pred(Pred), m_Reg(LHS), m_Reg(RHS)))) {
629 LHS = CondReg;
630 RHS = RISCV::X0;
631 CC = RISCVCC::COND_NE;
632 return;
633 }
634
635 // We found an ICmp, do some canonicalization.
636
637 // Adjust comparisons to use comparison with 0 if possible.
639 switch (Pred) {
641 // Convert X > -1 to X >= 0
642 if (*Constant == -1) {
643 CC = RISCVCC::COND_GE;
644 RHS = RISCV::X0;
645 return;
646 }
647 break;
649 // Convert X < 1 to 0 >= X
650 if (*Constant == 1) {
651 CC = RISCVCC::COND_GE;
652 RHS = LHS;
653 LHS = RISCV::X0;
654 return;
655 }
656 break;
657 default:
658 break;
659 }
660 }
661
662 switch (Pred) {
663 default:
664 llvm_unreachable("Expected ICMP CmpInst::Predicate.");
671 // These CCs are supported directly by RISC-V branches.
672 break;
677 // These CCs are not supported directly by RISC-V branches, but changing the
678 // direction of the CC and swapping LHS and RHS are.
679 Pred = CmpInst::getSwappedPredicate(Pred);
680 std::swap(LHS, RHS);
681 break;
682 }
683
684 CC = getRISCVCCFromICmp(Pred);
685}
686
687/// Select the RISC-V Zalasr opcode for the G_LOAD or G_STORE operation
688/// \p GenericOpc, appropriate for the GPR register bank and of memory access
689/// size \p OpSize.
690static unsigned selectZalasrLoadStoreOp(unsigned GenericOpc, unsigned OpSize) {
691 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
692 switch (OpSize) {
693 default:
694 llvm_unreachable("Unexpected memory size");
695 case 8:
696 return IsStore ? RISCV::SB_RL : RISCV::LB_AQ;
697 case 16:
698 return IsStore ? RISCV::SH_RL : RISCV::LH_AQ;
699 case 32:
700 return IsStore ? RISCV::SW_RL : RISCV::LW_AQ;
701 case 64:
702 return IsStore ? RISCV::SD_RL : RISCV::LD_AQ;
703 }
704}
705
706/// Select the RISC-V regimm opcode for the G_LOAD or G_STORE operation
707/// \p GenericOpc, appropriate for the GPR register bank and of memory access
708/// size \p OpSize. \returns \p GenericOpc if the combination is unsupported.
709static unsigned selectRegImmLoadStoreOp(unsigned GenericOpc, unsigned OpSize) {
710 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
711 switch (OpSize) {
712 case 8:
713 // Prefer unsigned due to no c.lb in Zcb.
714 return IsStore ? RISCV::SB : RISCV::LBU;
715 case 16:
716 return IsStore ? RISCV::SH : RISCV::LH;
717 case 32:
718 return IsStore ? RISCV::SW : RISCV::LW;
719 case 64:
720 return IsStore ? RISCV::SD : RISCV::LD;
721 }
722
723 return GenericOpc;
724}
725
726void RISCVInstructionSelector::addVectorLoadStoreOperands(
727 MachineInstr &I, SmallVectorImpl<SrcOp> &SrcOps, unsigned &CurOp,
728 bool IsMasked, bool IsStridedOrIndexed, LLT *IndexVT) const {
729 // Base Pointer
730 auto PtrReg = I.getOperand(CurOp++).getReg();
731 SrcOps.push_back(PtrReg);
732
733 // Stride or Index
734 if (IsStridedOrIndexed) {
735 auto StrideReg = I.getOperand(CurOp++).getReg();
736 SrcOps.push_back(StrideReg);
737 if (IndexVT)
738 *IndexVT = MRI->getType(StrideReg);
739 }
740
741 // Mask
742 if (IsMasked) {
743 auto MaskReg = I.getOperand(CurOp++).getReg();
744 SrcOps.push_back(MaskReg);
745 }
746}
747
748bool RISCVInstructionSelector::selectIntrinsicWithSideEffects(
749 MachineInstr &I, MachineIRBuilder &MIB) const {
750 // Find the intrinsic ID.
751 unsigned IntrinID = cast<GIntrinsic>(I).getIntrinsicID();
752 // Select the instruction.
753 switch (IntrinID) {
754 default:
755 return false;
756 case Intrinsic::riscv_vlm:
757 case Intrinsic::riscv_vle:
758 case Intrinsic::riscv_vle_mask:
759 case Intrinsic::riscv_vlse:
760 case Intrinsic::riscv_vlse_mask: {
761 bool IsMasked = IntrinID == Intrinsic::riscv_vle_mask ||
762 IntrinID == Intrinsic::riscv_vlse_mask;
763 bool IsStrided = IntrinID == Intrinsic::riscv_vlse ||
764 IntrinID == Intrinsic::riscv_vlse_mask;
765 LLT VT = MRI->getType(I.getOperand(0).getReg());
766 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
767
768 // Result vector
769 const Register DstReg = I.getOperand(0).getReg();
770
771 // Sources
772 bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm;
773 unsigned CurOp = 2;
774 SmallVector<SrcOp, 4> SrcOps; // Source registers.
775
776 // Passthru
777 if (HasPassthruOperand) {
778 auto PassthruReg = I.getOperand(CurOp++).getReg();
779 SrcOps.push_back(PassthruReg);
780 } else {
781 SrcOps.push_back(Register(RISCV::NoRegister));
782 }
783
784 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, IsStrided);
785
787 const RISCV::VLEPseudo *P =
788 RISCV::getVLEPseudo(IsMasked, IsStrided, /*FF*/ false, Log2SEW,
789 static_cast<unsigned>(LMUL));
790
791 auto PseudoMI = MIB.buildInstr(P->Pseudo, {DstReg}, SrcOps);
792
793 // Select VL
794 auto VLOpFn = renderVLOp(I.getOperand(CurOp++));
795 for (auto &RenderFn : *VLOpFn)
796 RenderFn(PseudoMI);
797
798 // SEW
799 PseudoMI.addImm(Log2SEW);
800
801 // Policy
802 uint64_t Policy = RISCVVType::MASK_AGNOSTIC;
803 if (IsMasked)
804 Policy = I.getOperand(CurOp++).getImm();
805 PseudoMI.addImm(Policy);
806
807 // Memref
808 PseudoMI.cloneMemRefs(I);
809
810 I.eraseFromParent();
811 constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
812 return true;
813 }
814 case Intrinsic::riscv_vloxei:
815 case Intrinsic::riscv_vloxei_mask:
816 case Intrinsic::riscv_vluxei:
817 case Intrinsic::riscv_vluxei_mask: {
818 bool IsMasked = IntrinID == Intrinsic::riscv_vloxei_mask ||
819 IntrinID == Intrinsic::riscv_vluxei_mask;
820 bool IsOrdered = IntrinID == Intrinsic::riscv_vloxei ||
821 IntrinID == Intrinsic::riscv_vloxei_mask;
822 LLT VT = MRI->getType(I.getOperand(0).getReg());
823 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
824
825 // Result vector
826 const Register DstReg = I.getOperand(0).getReg();
827
828 // Sources
829 bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm;
830 unsigned CurOp = 2;
831 SmallVector<SrcOp, 4> SrcOps; // Source registers.
832
833 // Passthru
834 if (HasPassthruOperand) {
835 auto PassthruReg = I.getOperand(CurOp++).getReg();
836 SrcOps.push_back(PassthruReg);
837 } else {
838 // Use NoRegister if there is no specified passthru.
839 SrcOps.push_back(Register());
840 }
841 LLT IndexVT;
842 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, true, &IndexVT);
843
845 RISCVVType::VLMUL IndexLMUL =
847 unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
848 if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
849 reportFatalUsageError("The V extension does not support EEW=64 for index "
850 "values when XLEN=32");
851 }
852 const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
853 IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
854 static_cast<unsigned>(IndexLMUL));
855
856 auto PseudoMI = MIB.buildInstr(P->Pseudo, {DstReg}, SrcOps);
857
858 // Select VL
859 auto VLOpFn = renderVLOp(I.getOperand(CurOp++));
860 for (auto &RenderFn : *VLOpFn)
861 RenderFn(PseudoMI);
862
863 // SEW
864 PseudoMI.addImm(Log2SEW);
865
866 // Policy
867 uint64_t Policy = RISCVVType::MASK_AGNOSTIC;
868 if (IsMasked)
869 Policy = I.getOperand(CurOp++).getImm();
870 PseudoMI.addImm(Policy);
871
872 // Memref
873 PseudoMI.cloneMemRefs(I);
874
875 I.eraseFromParent();
876 constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
877 return true;
878 }
879 case Intrinsic::riscv_vsm:
880 case Intrinsic::riscv_vse:
881 case Intrinsic::riscv_vse_mask:
882 case Intrinsic::riscv_vsse:
883 case Intrinsic::riscv_vsse_mask: {
884 bool IsMasked = IntrinID == Intrinsic::riscv_vse_mask ||
885 IntrinID == Intrinsic::riscv_vsse_mask;
886 bool IsStrided = IntrinID == Intrinsic::riscv_vsse ||
887 IntrinID == Intrinsic::riscv_vsse_mask;
888 LLT VT = MRI->getType(I.getOperand(1).getReg());
889 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
890
891 // Sources
892 unsigned CurOp = 1;
893 SmallVector<SrcOp, 4> SrcOps; // Source registers.
894
895 // Store value
896 auto PassthruReg = I.getOperand(CurOp++).getReg();
897 SrcOps.push_back(PassthruReg);
898
899 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, IsStrided);
900
902 const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
903 IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
904
905 auto PseudoMI = MIB.buildInstr(P->Pseudo, {}, SrcOps);
906
907 // Select VL
908 auto VLOpFn = renderVLOp(I.getOperand(CurOp++));
909 for (auto &RenderFn : *VLOpFn)
910 RenderFn(PseudoMI);
911
912 // SEW
913 PseudoMI.addImm(Log2SEW);
914
915 // Memref
916 PseudoMI.cloneMemRefs(I);
917
918 I.eraseFromParent();
919 constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
920 return true;
921 }
922 case Intrinsic::riscv_vsoxei:
923 case Intrinsic::riscv_vsoxei_mask:
924 case Intrinsic::riscv_vsuxei:
925 case Intrinsic::riscv_vsuxei_mask: {
926 bool IsMasked = IntrinID == Intrinsic::riscv_vsoxei_mask ||
927 IntrinID == Intrinsic::riscv_vsuxei_mask;
928 bool IsOrdered = IntrinID == Intrinsic::riscv_vsoxei ||
929 IntrinID == Intrinsic::riscv_vsoxei_mask;
930 LLT VT = MRI->getType(I.getOperand(1).getReg());
931 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
932
933 // Sources
934 unsigned CurOp = 1;
935 SmallVector<SrcOp, 4> SrcOps; // Source registers.
936
937 // Store value
938 auto PassthruReg = I.getOperand(CurOp++).getReg();
939 SrcOps.push_back(PassthruReg);
940
941 LLT IndexVT;
942 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, true, &IndexVT);
943
945 RISCVVType::VLMUL IndexLMUL =
947 unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
948 if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
949 reportFatalUsageError("The V extension does not support EEW=64 for index "
950 "values when XLEN=32");
951 }
952 const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
953 IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
954 static_cast<unsigned>(IndexLMUL));
955
956 auto PseudoMI = MIB.buildInstr(P->Pseudo, {}, SrcOps);
957
958 // Select VL
959 auto VLOpFn = renderVLOp(I.getOperand(CurOp++));
960 for (auto &RenderFn : *VLOpFn)
961 RenderFn(PseudoMI);
962
963 // SEW
964 PseudoMI.addImm(Log2SEW);
965
966 // Memref
967 PseudoMI.cloneMemRefs(I);
968
969 I.eraseFromParent();
970 constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
971 return true;
972 }
973 }
974}
975
976bool RISCVInstructionSelector::selectIntrinsic(MachineInstr &I,
977 MachineIRBuilder &MIB) const {
978 // Find the intrinsic ID.
979 unsigned IntrinID = cast<GIntrinsic>(I).getIntrinsicID();
980 // Select the instruction.
981 switch (IntrinID) {
982 default:
983 return false;
984 case Intrinsic::riscv_vsetvli:
985 case Intrinsic::riscv_vsetvlimax: {
986
987 bool VLMax = IntrinID == Intrinsic::riscv_vsetvlimax;
988
989 unsigned Offset = VLMax ? 2 : 3;
990 unsigned SEW = RISCVVType::decodeVSEW(I.getOperand(Offset).getImm() & 0x7);
991 RISCVVType::VLMUL VLMul =
992 static_cast<RISCVVType::VLMUL>(I.getOperand(Offset + 1).getImm() & 0x7);
993
994 unsigned VTypeI = RISCVVType::encodeVTYPE(VLMul, SEW, /*TailAgnostic*/ true,
995 /*MaskAgnostic*/ true);
996
997 Register DstReg = I.getOperand(0).getReg();
998
999 Register VLOperand;
1000 unsigned Opcode = RISCV::PseudoVSETVLI;
1001
1002 // Check if AVL is a constant that equals VLMAX.
1003 if (!VLMax) {
1004 Register AVLReg = I.getOperand(2).getReg();
1005 if (auto AVLConst = getIConstantVRegValWithLookThrough(AVLReg, *MRI)) {
1006 uint64_t AVL = AVLConst->Value.getZExtValue();
1007 if (auto VLEN = Subtarget->getRealVLen()) {
1008 if (*VLEN / RISCVVType::getSEWLMULRatio(SEW, VLMul) == AVL)
1009 VLMax = true;
1010 }
1011 }
1012
1013 MachineInstr *AVLDef = MRI->getVRegDef(AVLReg);
1014 if (AVLDef && AVLDef->getOpcode() == TargetOpcode::G_CONSTANT) {
1015 const auto *C = AVLDef->getOperand(1).getCImm();
1016 if (C->getValue().isAllOnes())
1017 VLMax = true;
1018 }
1019 }
1020
1021 if (VLMax) {
1022 VLOperand = Register(RISCV::X0);
1023 Opcode = RISCV::PseudoVSETVLIX0;
1024 } else {
1025 Register AVLReg = I.getOperand(2).getReg();
1026 VLOperand = AVLReg;
1027
1028 // Check if AVL is a small constant that can use PseudoVSETIVLI.
1029 if (auto AVLConst = getIConstantVRegValWithLookThrough(AVLReg, *MRI)) {
1030 uint64_t AVL = AVLConst->Value.getZExtValue();
1031 if (isUInt<5>(AVL)) {
1032 auto PseudoMI = MIB.buildInstr(RISCV::PseudoVSETIVLI, {DstReg}, {})
1033 .addImm(AVL)
1034 .addImm(VTypeI);
1035 I.eraseFromParent();
1036 constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
1037 return true;
1038 }
1039 }
1040 }
1041
1042 auto PseudoMI =
1043 MIB.buildInstr(Opcode, {DstReg}, {VLOperand}).addImm(VTypeI);
1044 I.eraseFromParent();
1045 constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
1046 return true;
1047 }
1048 }
1049}
1050
1051bool RISCVInstructionSelector::selectExtractSubvector(
1052 MachineInstr &MI, MachineIRBuilder &MIB) const {
1053 assert(MI.getOpcode() == TargetOpcode::G_EXTRACT_SUBVECTOR);
1054
1055 Register DstReg = MI.getOperand(0).getReg();
1056 Register SrcReg = MI.getOperand(1).getReg();
1057
1058 LLT DstTy = MRI->getType(DstReg);
1059 LLT SrcTy = MRI->getType(SrcReg);
1060
1061 unsigned Idx = static_cast<unsigned>(MI.getOperand(2).getImm());
1062
1063 MVT DstMVT = getMVTForLLT(DstTy);
1064 MVT SrcMVT = getMVTForLLT(SrcTy);
1065
1066 unsigned SubRegIdx;
1067 std::tie(SubRegIdx, Idx) =
1069 SrcMVT, DstMVT, Idx, &TRI);
1070
1071 if (Idx != 0)
1072 return false;
1073
1074 unsigned DstRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(DstMVT);
1075 const TargetRegisterClass *DstRC = TRI.getRegClass(DstRegClassID);
1076 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
1077 return false;
1078
1079 unsigned SrcRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(SrcMVT);
1080 const TargetRegisterClass *SrcRC = TRI.getRegClass(SrcRegClassID);
1081 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
1082 return false;
1083
1084 MIB.buildInstr(TargetOpcode::COPY, {DstReg}, {})
1085 .addReg(SrcReg, {}, SubRegIdx);
1086
1087 MI.eraseFromParent();
1088 return true;
1089}
1090
1091bool RISCVInstructionSelector::select(MachineInstr &MI) {
1092 MachineIRBuilder MIB(MI);
1093
1094 preISelLower(MI, MIB);
1095 const unsigned Opc = MI.getOpcode();
1096
1097 if (!MI.isPreISelOpcode() || Opc == TargetOpcode::G_PHI) {
1098 if (Opc == TargetOpcode::PHI || Opc == TargetOpcode::G_PHI) {
1099 const Register DefReg = MI.getOperand(0).getReg();
1100 const LLT DefTy = MRI->getType(DefReg);
1101
1102 const RegClassOrRegBank &RegClassOrBank =
1103 MRI->getRegClassOrRegBank(DefReg);
1104
1105 const TargetRegisterClass *DefRC =
1107 if (!DefRC) {
1108 if (!DefTy.isValid()) {
1109 LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
1110 return false;
1111 }
1112
1113 const RegisterBank &RB = *cast<const RegisterBank *>(RegClassOrBank);
1114 DefRC = getRegClassForTypeOnBank(DefTy, RB);
1115 if (!DefRC) {
1116 LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
1117 return false;
1118 }
1119 }
1120
1121 MI.setDesc(TII.get(TargetOpcode::PHI));
1122 return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
1123 }
1124
1125 // Certain non-generic instructions also need some special handling.
1126 if (MI.isCopy())
1127 return selectCopy(MI);
1128
1129 return true;
1130 }
1131
1132 if (selectImpl(MI, *CoverageInfo))
1133 return true;
1134
1135 switch (Opc) {
1136 case TargetOpcode::G_ANYEXT:
1137 case TargetOpcode::G_PTRTOINT:
1138 case TargetOpcode::G_INTTOPTR:
1139 case TargetOpcode::G_TRUNC:
1140 case TargetOpcode::G_FREEZE:
1141 return selectCopy(MI);
1142 case TargetOpcode::G_CONSTANT: {
1143 Register DstReg = MI.getOperand(0).getReg();
1144 int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue();
1145
1146 if (!materializeImm(DstReg, Imm, MIB))
1147 return false;
1148
1149 MI.eraseFromParent();
1150 return true;
1151 }
1152 case TargetOpcode::G_ZEXT:
1153 case TargetOpcode::G_SEXT: {
1154 bool IsSigned = Opc != TargetOpcode::G_ZEXT;
1155 Register DstReg = MI.getOperand(0).getReg();
1156 Register SrcReg = MI.getOperand(1).getReg();
1157 LLT SrcTy = MRI->getType(SrcReg);
1158 unsigned SrcSize = SrcTy.getSizeInBits();
1159
1160 if (SrcTy.isVector())
1161 return false; // Should be handled by imported patterns.
1162
1163 assert((*RBI.getRegBank(DstReg, *MRI, TRI)).getID() ==
1164 RISCV::GPRBRegBankID &&
1165 "Unexpected ext regbank");
1166
1167 // Use addiw SrcReg, 0 (sext.w) for i32.
1168 if (IsSigned && SrcSize == 32) {
1169 MI.setDesc(TII.get(RISCV::ADDIW));
1170 MI.addOperand(MachineOperand::CreateImm(0));
1172 return true;
1173 }
1174
1175 // Use add.uw SrcReg, X0 (zext.w) for i32 with Zba.
1176 if (!IsSigned && SrcSize == 32 && STI.hasStdExtZba()) {
1177 MI.setDesc(TII.get(RISCV::ADD_UW));
1178 MI.addOperand(MachineOperand::CreateReg(RISCV::X0, /*isDef=*/false));
1180 return true;
1181 }
1182
1183 // Use sext.h/zext.h for i16 with Zbb.
1184 if (SrcSize == 16 &&
1185 (STI.hasStdExtZbb() || (!IsSigned && STI.hasStdExtZbkb()))) {
1186 MI.setDesc(TII.get(IsSigned ? RISCV::SEXT_H
1187 : STI.isRV64() ? RISCV::ZEXT_H_RV64
1188 : RISCV::ZEXT_H_RV32));
1190 return true;
1191 }
1192
1193 // Fall back to shift pair.
1194 auto ShiftLeft =
1195 MIB.buildInstr(RISCV::SLLI, {&RISCV::GPRRegClass}, {SrcReg})
1196 .addImm(STI.getXLen() - SrcSize);
1197 constrainSelectedInstRegOperands(*ShiftLeft, TII, TRI, RBI);
1198 auto ShiftRight = MIB.buildInstr(IsSigned ? RISCV::SRAI : RISCV::SRLI,
1199 {DstReg}, {ShiftLeft})
1200 .addImm(STI.getXLen() - SrcSize);
1201 constrainSelectedInstRegOperands(*ShiftRight, TII, TRI, RBI);
1202 MI.eraseFromParent();
1203 return true;
1204 }
1205 case TargetOpcode::G_FCONSTANT: {
1206 // TODO: Use constant pool for complex constants.
1207 Register DstReg = MI.getOperand(0).getReg();
1208 const APFloat &FPimm = MI.getOperand(1).getFPImm()->getValueAPF();
1209 unsigned Size = MRI->getType(DstReg).getSizeInBits();
1210 if (Size == 16 || Size == 32 || (Size == 64 && Subtarget->is64Bit())) {
1211 Register GPRReg;
1212 if (FPimm.isPosZero()) {
1213 GPRReg = RISCV::X0;
1214 } else {
1215 GPRReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1216 APInt Imm = FPimm.bitcastToAPInt();
1217 if (!materializeImm(GPRReg, Imm.getSExtValue(), MIB))
1218 return false;
1219 }
1220
1221 unsigned Opcode = Size == 64 ? RISCV::FMV_D_X
1222 : Size == 32 ? RISCV::FMV_W_X
1223 : RISCV::FMV_H_X;
1224 auto FMV = MIB.buildInstr(Opcode, {DstReg}, {GPRReg});
1225 FMV.constrainAllUses(TII, TRI, RBI);
1226 } else {
1227 // s64 on rv32
1228 assert(Size == 64 && !Subtarget->is64Bit() &&
1229 "Unexpected size or subtarget");
1230
1231 if (FPimm.isPosZero()) {
1232 // Optimize +0.0 to use fcvt.d.w
1233 MachineInstrBuilder FCVT =
1234 MIB.buildInstr(RISCV::FCVT_D_W, {DstReg}, {Register(RISCV::X0)})
1235 .addImm(RISCVFPRndMode::RNE);
1236 FCVT.constrainAllUses(TII, TRI, RBI);
1237
1238 MI.eraseFromParent();
1239 return true;
1240 }
1241
1242 // Split into two pieces and build through the stack.
1243 Register GPRRegHigh = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1244 Register GPRRegLow = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1245 APInt Imm = FPimm.bitcastToAPInt();
1246 if (!materializeImm(GPRRegHigh, Imm.extractBits(32, 32).getSExtValue(),
1247 MIB))
1248 return false;
1249 if (!materializeImm(GPRRegLow, Imm.trunc(32).getSExtValue(), MIB))
1250 return false;
1251 MachineInstrBuilder PairF64 = MIB.buildInstr(
1252 RISCV::BuildPairF64Pseudo, {DstReg}, {GPRRegLow, GPRRegHigh});
1253 PairF64.constrainAllUses(TII, TRI, RBI);
1254 }
1255
1256 MI.eraseFromParent();
1257 return true;
1258 }
1259 case TargetOpcode::G_GLOBAL_VALUE: {
1260 auto *GV = MI.getOperand(1).getGlobal();
1261 if (GV->isThreadLocal()) {
1262 // TODO: implement this case.
1263 return false;
1264 }
1265
1266 return selectAddr(MI, MIB, GV->isDSOLocal(), GV->hasExternalWeakLinkage());
1267 }
1268 case TargetOpcode::G_JUMP_TABLE:
1269 case TargetOpcode::G_CONSTANT_POOL:
1270 return selectAddr(MI, MIB, MRI);
1271 case TargetOpcode::G_BRCOND: {
1272 Register LHS, RHS;
1274 getOperandsForBranch(MI.getOperand(0).getReg(), CC, LHS, RHS, *MRI);
1275
1276 auto Bcc = MIB.buildInstr(RISCVCC::getBrCond(CC), {}, {LHS, RHS})
1277 .addMBB(MI.getOperand(1).getMBB());
1278 MI.eraseFromParent();
1280 return true;
1281 }
1282 case TargetOpcode::G_BRINDIRECT:
1283 MI.setDesc(TII.get(RISCV::PseudoBRIND));
1284 MI.addOperand(MachineOperand::CreateImm(0));
1286 return true;
1287 case TargetOpcode::G_SELECT:
1288 return selectSelect(MI, MIB);
1289 case TargetOpcode::G_FCMP:
1290 return selectFPCompare(MI, MIB);
1291 case TargetOpcode::G_FENCE: {
1292 AtomicOrdering FenceOrdering =
1293 static_cast<AtomicOrdering>(MI.getOperand(0).getImm());
1294 SyncScope::ID FenceSSID =
1295 static_cast<SyncScope::ID>(MI.getOperand(1).getImm());
1296 emitFence(FenceOrdering, FenceSSID, MIB);
1297 MI.eraseFromParent();
1298 return true;
1299 }
1300 case TargetOpcode::G_IMPLICIT_DEF:
1301 return selectImplicitDef(MI, MIB);
1302 case TargetOpcode::G_UNMERGE_VALUES:
1303 return selectUnmergeValues(MI, MIB);
1304 case TargetOpcode::G_LOAD:
1305 case TargetOpcode::G_STORE: {
1306 GLoadStore &LdSt = cast<GLoadStore>(MI);
1307 const Register ValReg = LdSt.getReg(0);
1308 const Register PtrReg = LdSt.getPointerReg();
1309 LLT PtrTy = MRI->getType(PtrReg);
1310
1311 const RegisterBank &RB = *RBI.getRegBank(ValReg, *MRI, TRI);
1312 if (RB.getID() != RISCV::GPRBRegBankID)
1313 return false;
1314
1315#ifndef NDEBUG
1316 const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, *MRI, TRI);
1317 // Check that the pointer register is valid.
1318 assert(PtrRB.getID() == RISCV::GPRBRegBankID &&
1319 "Load/Store pointer operand isn't a GPR");
1320 assert(PtrTy.isPointer() && "Load/Store pointer operand isn't a pointer");
1321#endif
1322
1323 // Can only handle AddressSpace 0.
1324 if (PtrTy.getAddressSpace() != 0)
1325 return false;
1326
1327 unsigned MemSize = LdSt.getMemSizeInBits().getValue();
1328 AtomicOrdering Order = LdSt.getMMO().getSuccessOrdering();
1329
1330 if (isStrongerThanMonotonic(Order)) {
1331 MI.setDesc(TII.get(selectZalasrLoadStoreOp(Opc, MemSize)));
1333 return true;
1334 }
1335
1336 const unsigned NewOpc = selectRegImmLoadStoreOp(MI.getOpcode(), MemSize);
1337 if (NewOpc == MI.getOpcode())
1338 return false;
1339
1340 // Check if we can fold anything into the addressing mode.
1341 auto AddrModeFns = selectAddrRegImm(MI.getOperand(1));
1342 if (!AddrModeFns)
1343 return false;
1344
1345 // Folded something. Create a new instruction and return it.
1346 auto NewInst = MIB.buildInstr(NewOpc, {}, {}, MI.getFlags());
1347 if (isa<GStore>(MI))
1348 NewInst.addUse(ValReg);
1349 else
1350 NewInst.addDef(ValReg);
1351 NewInst.cloneMemRefs(MI);
1352 for (auto &Fn : *AddrModeFns)
1353 Fn(NewInst);
1354 MI.eraseFromParent();
1355
1356 constrainSelectedInstRegOperands(*NewInst, TII, TRI, RBI);
1357 return true;
1358 }
1359 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1360 return selectIntrinsicWithSideEffects(MI, MIB);
1361 case TargetOpcode::G_INTRINSIC:
1362 return selectIntrinsic(MI, MIB);
1363 case TargetOpcode::G_EXTRACT_SUBVECTOR:
1364 return selectExtractSubvector(MI, MIB);
1365 default:
1366 return false;
1367 }
1368}
1369
1370bool RISCVInstructionSelector::selectUnmergeValues(
1371 MachineInstr &MI, MachineIRBuilder &MIB) const {
1372 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
1373
1374 if (!Subtarget->hasStdExtZfa())
1375 return false;
1376
1377 // Split F64 Src into two s32 parts
1378 if (MI.getNumOperands() != 3)
1379 return false;
1380 Register Src = MI.getOperand(2).getReg();
1381 Register Lo = MI.getOperand(0).getReg();
1382 Register Hi = MI.getOperand(1).getReg();
1383 if (!isRegInFprb(Src) || !isRegInGprb(Lo) || !isRegInGprb(Hi))
1384 return false;
1385
1386 MachineInstr *ExtractLo = MIB.buildInstr(RISCV::FMV_X_W_FPR64, {Lo}, {Src});
1387 constrainSelectedInstRegOperands(*ExtractLo, TII, TRI, RBI);
1388
1389 MachineInstr *ExtractHi = MIB.buildInstr(RISCV::FMVH_X_D, {Hi}, {Src});
1390 constrainSelectedInstRegOperands(*ExtractHi, TII, TRI, RBI);
1391
1392 MI.eraseFromParent();
1393 return true;
1394}
1395
1396bool RISCVInstructionSelector::replacePtrWithInt(MachineOperand &Op,
1397 MachineIRBuilder &MIB) {
1398 Register PtrReg = Op.getReg();
1399 assert(MRI->getType(PtrReg).isPointer() && "Operand is not a pointer!");
1400
1401 const LLT sXLen = LLT::scalar(STI.getXLen());
1402 auto PtrToInt = MIB.buildPtrToInt(sXLen, PtrReg);
1403 MRI->setRegBank(PtrToInt.getReg(0), RBI.getRegBank(RISCV::GPRBRegBankID));
1404 Op.setReg(PtrToInt.getReg(0));
1405 return select(*PtrToInt);
1406}
1407
1408void RISCVInstructionSelector::preISelLower(MachineInstr &MI,
1409 MachineIRBuilder &MIB) {
1410 switch (MI.getOpcode()) {
1411 case TargetOpcode::G_PTR_ADD: {
1412 Register DstReg = MI.getOperand(0).getReg();
1413 const LLT sXLen = LLT::scalar(STI.getXLen());
1414
1415 replacePtrWithInt(MI.getOperand(1), MIB);
1416 MI.setDesc(TII.get(TargetOpcode::G_ADD));
1417 MRI->setType(DstReg, sXLen);
1418 break;
1419 }
1420 case TargetOpcode::G_PTRMASK: {
1421 Register DstReg = MI.getOperand(0).getReg();
1422 const LLT sXLen = LLT::scalar(STI.getXLen());
1423 replacePtrWithInt(MI.getOperand(1), MIB);
1424 MI.setDesc(TII.get(TargetOpcode::G_AND));
1425 MRI->setType(DstReg, sXLen);
1426 break;
1427 }
1428 }
1429}
1430
1431void RISCVInstructionSelector::renderNegImm(MachineInstrBuilder &MIB,
1432 const MachineInstr &MI,
1433 int OpIdx) const {
1434 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1435 "Expected G_CONSTANT");
1436 int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
1437 MIB.addImm(-CstVal);
1438}
1439
1440void RISCVInstructionSelector::renderImmSubFromXLen(MachineInstrBuilder &MIB,
1441 const MachineInstr &MI,
1442 int OpIdx) const {
1443 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1444 "Expected G_CONSTANT");
1445 uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();
1446 MIB.addImm(STI.getXLen() - CstVal);
1447}
1448
1449void RISCVInstructionSelector::renderImmSubFrom32(MachineInstrBuilder &MIB,
1450 const MachineInstr &MI,
1451 int OpIdx) const {
1452 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1453 "Expected G_CONSTANT");
1454 uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();
1455 MIB.addImm(32 - CstVal);
1456}
1457
1458void RISCVInstructionSelector::renderImmPlus1(MachineInstrBuilder &MIB,
1459 const MachineInstr &MI,
1460 int OpIdx) const {
1461 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1462 "Expected G_CONSTANT");
1463 int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
1464 MIB.addImm(CstVal + 1);
1465}
1466
1467void RISCVInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
1468 const MachineInstr &MI,
1469 int OpIdx) const {
1470 assert(MI.getOpcode() == TargetOpcode::G_FRAME_INDEX && OpIdx == -1 &&
1471 "Expected G_FRAME_INDEX");
1472 MIB.add(MI.getOperand(1));
1473}
1474
1475void RISCVInstructionSelector::renderTrailingZeros(MachineInstrBuilder &MIB,
1476 const MachineInstr &MI,
1477 int OpIdx) const {
1478 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1479 "Expected G_CONSTANT");
1480 uint64_t C = MI.getOperand(1).getCImm()->getZExtValue();
1482}
1483
1484void RISCVInstructionSelector::renderXLenSubTrailingOnes(
1485 MachineInstrBuilder &MIB, const MachineInstr &MI, int OpIdx) const {
1486 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1487 "Expected G_CONSTANT");
1488 uint64_t C = MI.getOperand(1).getCImm()->getZExtValue();
1489 MIB.addImm(Subtarget->getXLen() - llvm::countr_one(C));
1490}
1491
1492void RISCVInstructionSelector::renderAddiPairImmSmall(MachineInstrBuilder &MIB,
1493 const MachineInstr &MI,
1494 int OpIdx) const {
1495 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1496 "Expected G_CONSTANT");
1497 int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue();
1498 int64_t Adj = Imm < 0 ? -2048 : 2047;
1499 MIB.addImm(Imm - Adj);
1500}
1501
1502void RISCVInstructionSelector::renderAddiPairImmLarge(MachineInstrBuilder &MIB,
1503 const MachineInstr &MI,
1504 int OpIdx) const {
1505 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1506 "Expected G_CONSTANT");
1507 int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue() < 0 ? -2048 : 2047;
1508 MIB.addImm(Imm);
1509}
1510
1511const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank(
1512 LLT Ty, const RegisterBank &RB) const {
1513 if (RB.getID() == RISCV::GPRBRegBankID) {
1514 if (Ty.getSizeInBits() <= 32 || (STI.is64Bit() && Ty.getSizeInBits() == 64))
1515 return &RISCV::GPRRegClass;
1516 }
1517
1518 if (RB.getID() == RISCV::FPRBRegBankID) {
1519 if (Ty.getSizeInBits() == 16)
1520 return &RISCV::FPR16RegClass;
1521 if (Ty.getSizeInBits() == 32)
1522 return &RISCV::FPR32RegClass;
1523 if (Ty.getSizeInBits() == 64)
1524 return &RISCV::FPR64RegClass;
1525 }
1526
1527 if (RB.getID() == RISCV::VRBRegBankID) {
1528 if (Ty.getSizeInBits().getKnownMinValue() <= 64)
1529 return &RISCV::VRRegClass;
1530
1531 if (Ty.getSizeInBits().getKnownMinValue() == 128)
1532 return &RISCV::VRM2RegClass;
1533
1534 if (Ty.getSizeInBits().getKnownMinValue() == 256)
1535 return &RISCV::VRM4RegClass;
1536
1537 if (Ty.getSizeInBits().getKnownMinValue() == 512)
1538 return &RISCV::VRM8RegClass;
1539 }
1540
1541 return nullptr;
1542}
1543
1544bool RISCVInstructionSelector::isRegInGprb(Register Reg) const {
1545 return RBI.getRegBank(Reg, *MRI, TRI)->getID() == RISCV::GPRBRegBankID;
1546}
1547
1548bool RISCVInstructionSelector::isRegInFprb(Register Reg) const {
1549 return RBI.getRegBank(Reg, *MRI, TRI)->getID() == RISCV::FPRBRegBankID;
1550}
1551
1552bool RISCVInstructionSelector::selectCopy(MachineInstr &MI) const {
1553 Register DstReg = MI.getOperand(0).getReg();
1554
1555 if (DstReg.isPhysical())
1556 return true;
1557
1558 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
1559 MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI));
1560 assert(DstRC &&
1561 "Register class not available for LLT, register bank combination");
1562
1563 // No need to constrain SrcReg. It will get constrained when
1564 // we hit another of its uses or its defs.
1565 // Copies do not have constraints.
1566 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1567 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
1568 << " operand\n");
1569 return false;
1570 }
1571
1572 MI.setDesc(TII.get(RISCV::COPY));
1573 return true;
1574}
1575
1576bool RISCVInstructionSelector::selectImplicitDef(MachineInstr &MI,
1577 MachineIRBuilder &MIB) const {
1578 assert(MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
1579
1580 const Register DstReg = MI.getOperand(0).getReg();
1581 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
1582 MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI));
1583
1584 assert(DstRC &&
1585 "Register class not available for LLT, register bank combination");
1586
1587 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1588 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
1589 << " operand\n");
1590 }
1591 MI.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
1592 return true;
1593}
1594
1595bool RISCVInstructionSelector::materializeImm(Register DstReg, int64_t Imm,
1596 MachineIRBuilder &MIB) const {
1597 if (Imm == 0) {
1598 MIB.buildCopy(DstReg, Register(RISCV::X0));
1599 RBI.constrainGenericRegister(DstReg, RISCV::GPRRegClass, *MRI);
1600 return true;
1601 }
1602
1604 unsigned NumInsts = Seq.size();
1605 Register SrcReg = RISCV::X0;
1606
1607 for (unsigned i = 0; i < NumInsts; i++) {
1608 Register TmpReg = i < NumInsts - 1
1609 ? MRI->createVirtualRegister(&RISCV::GPRRegClass)
1610 : DstReg;
1611 const RISCVMatInt::Inst &I = Seq[i];
1612 MachineInstr *Result;
1613
1614 switch (I.getOpndKind()) {
1615 case RISCVMatInt::Imm:
1616 // clang-format off
1617 Result = MIB.buildInstr(I.getOpcode(), {TmpReg}, {})
1618 .addImm(I.getImm());
1619 // clang-format on
1620 break;
1621 case RISCVMatInt::RegX0:
1622 Result = MIB.buildInstr(I.getOpcode(), {TmpReg},
1623 {SrcReg, Register(RISCV::X0)});
1624 break;
1626 Result = MIB.buildInstr(I.getOpcode(), {TmpReg}, {SrcReg, SrcReg});
1627 break;
1629 Result =
1630 MIB.buildInstr(I.getOpcode(), {TmpReg}, {SrcReg}).addImm(I.getImm());
1631 break;
1632 }
1633
1635
1636 SrcReg = TmpReg;
1637 }
1638
1639 return true;
1640}
1641
1642bool RISCVInstructionSelector::selectAddr(MachineInstr &MI,
1643 MachineIRBuilder &MIB, bool IsLocal,
1644 bool IsExternWeak) const {
1645 assert((MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1646 MI.getOpcode() == TargetOpcode::G_JUMP_TABLE ||
1647 MI.getOpcode() == TargetOpcode::G_CONSTANT_POOL) &&
1648 "Unexpected opcode");
1649
1650 const MachineOperand &DispMO = MI.getOperand(1);
1651
1652 Register DefReg = MI.getOperand(0).getReg();
1653 const LLT DefTy = MRI->getType(DefReg);
1654
1655 // When HWASAN is used and tagging of global variables is enabled
1656 // they should be accessed via the GOT, since the tagged address of a global
1657 // is incompatible with existing code models. This also applies to non-pic
1658 // mode.
1659 if (TM.isPositionIndependent() || Subtarget->allowTaggedGlobals()) {
1660 if (IsLocal && !Subtarget->allowTaggedGlobals()) {
1661 // Use PC-relative addressing to access the symbol. This generates the
1662 // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
1663 // %pcrel_lo(auipc)).
1664 MI.setDesc(TII.get(RISCV::PseudoLLA));
1666 return true;
1667 }
1668
1669 // Use PC-relative addressing to access the GOT for this symbol, then
1670 // load the address from the GOT. This generates the pattern (PseudoLGA
1671 // sym), which expands to (ld (addi (auipc %got_pcrel_hi(sym))
1672 // %pcrel_lo(auipc))).
1673 MachineFunction &MF = *MI.getParent()->getParent();
1674 MachineMemOperand *MemOp = MF.getMachineMemOperand(
1678 DefTy, Align(DefTy.getSizeInBits() / 8));
1679
1680 auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {})
1681 .addDisp(DispMO, 0)
1682 .addMemOperand(MemOp);
1683
1685
1686 MI.eraseFromParent();
1687 return true;
1688 }
1689
1690 switch (TM.getCodeModel()) {
1691 default: {
1693 "Unsupported code model for lowering", MI);
1694 return false;
1695 }
1696 case CodeModel::Small: {
1697 // Must lie within a single 2 GiB address range and must lie between
1698 // absolute addresses -2 GiB and +2 GiB. This generates the pattern (addi
1699 // (lui %hi(sym)) %lo(sym)).
1700 Register AddrHiDest = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1701 MachineInstr *AddrHi = MIB.buildInstr(RISCV::LUI, {AddrHiDest}, {})
1702 .addDisp(DispMO, 0, RISCVII::MO_HI);
1703
1705
1706 auto Result = MIB.buildInstr(RISCV::ADDI, {DefReg}, {AddrHiDest})
1707 .addDisp(DispMO, 0, RISCVII::MO_LO);
1708
1710
1711 MI.eraseFromParent();
1712 return true;
1713 }
1714 case CodeModel::Medium:
1715 // Emit LGA/LLA instead of the sequence it expands to because the pcrel_lo
1716 // relocation needs to reference a label that points to the auipc
1717 // instruction itself, not the global. This cannot be done inside the
1718 // instruction selector.
1719 if (IsExternWeak) {
1720 // An extern weak symbol may be undefined, i.e. have value 0, which may
1721 // not be within 2GiB of PC, so use GOT-indirect addressing to access the
1722 // symbol. This generates the pattern (PseudoLGA sym), which expands to
1723 // (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
1724 MachineFunction &MF = *MI.getParent()->getParent();
1725 MachineMemOperand *MemOp = MF.getMachineMemOperand(
1729 DefTy, Align(DefTy.getSizeInBits() / 8));
1730
1731 auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {})
1732 .addDisp(DispMO, 0)
1733 .addMemOperand(MemOp);
1734
1736
1737 MI.eraseFromParent();
1738 return true;
1739 }
1740
1741 // Generate a sequence for accessing addresses within any 2GiB range
1742 // within the address space. This generates the pattern (PseudoLLA sym),
1743 // which expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
1744 MI.setDesc(TII.get(RISCV::PseudoLLA));
1746 return true;
1747 }
1748
1749 return false;
1750}
1751
1752bool RISCVInstructionSelector::selectSelect(MachineInstr &MI,
1753 MachineIRBuilder &MIB) const {
1754 auto &SelectMI = cast<GSelect>(MI);
1755
1756 Register LHS, RHS;
1758 getOperandsForBranch(SelectMI.getCondReg(), CC, LHS, RHS, *MRI);
1759
1760 Register DstReg = SelectMI.getReg(0);
1761
1762 unsigned Opc = RISCV::Select_GPR_Using_CC_GPR;
1763 if (RBI.getRegBank(DstReg, *MRI, TRI)->getID() == RISCV::FPRBRegBankID) {
1764 unsigned Size = MRI->getType(DstReg).getSizeInBits();
1765 Opc = Size == 32 ? RISCV::Select_FPR32_Using_CC_GPR
1766 : RISCV::Select_FPR64_Using_CC_GPR;
1767 }
1768
1769 MachineInstr *Result = MIB.buildInstr(Opc)
1770 .addDef(DstReg)
1771 .addReg(LHS)
1772 .addReg(RHS)
1773 .addImm(CC)
1774 .addReg(SelectMI.getTrueReg())
1775 .addReg(SelectMI.getFalseReg());
1776 MI.eraseFromParent();
1778 return true;
1779}
1780
1781// Convert an FCMP predicate to one of the supported F or D instructions.
1782static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size) {
1783 assert((Size == 16 || Size == 32 || Size == 64) && "Unsupported size");
1784 switch (Pred) {
1785 default:
1786 llvm_unreachable("Unsupported predicate");
1787 case CmpInst::FCMP_OLT:
1788 return Size == 16 ? RISCV::FLT_H : Size == 32 ? RISCV::FLT_S : RISCV::FLT_D;
1789 case CmpInst::FCMP_OLE:
1790 return Size == 16 ? RISCV::FLE_H : Size == 32 ? RISCV::FLE_S : RISCV::FLE_D;
1791 case CmpInst::FCMP_OEQ:
1792 return Size == 16 ? RISCV::FEQ_H : Size == 32 ? RISCV::FEQ_S : RISCV::FEQ_D;
1793 }
1794}
1795
1796// Try legalizing an FCMP by swapping or inverting the predicate to one that
1797// is supported.
1799 CmpInst::Predicate &Pred, bool &NeedInvert) {
1800 auto isLegalFCmpPredicate = [](CmpInst::Predicate Pred) {
1801 return Pred == CmpInst::FCMP_OLT || Pred == CmpInst::FCMP_OLE ||
1802 Pred == CmpInst::FCMP_OEQ;
1803 };
1804
1805 assert(!isLegalFCmpPredicate(Pred) && "Predicate already legal?");
1806
1808 if (isLegalFCmpPredicate(InvPred)) {
1809 Pred = InvPred;
1810 std::swap(LHS, RHS);
1811 return true;
1812 }
1813
1814 InvPred = CmpInst::getInversePredicate(Pred);
1815 NeedInvert = true;
1816 if (isLegalFCmpPredicate(InvPred)) {
1817 Pred = InvPred;
1818 return true;
1819 }
1820 InvPred = CmpInst::getSwappedPredicate(InvPred);
1821 if (isLegalFCmpPredicate(InvPred)) {
1822 Pred = InvPred;
1823 std::swap(LHS, RHS);
1824 return true;
1825 }
1826
1827 return false;
1828}
1829
1830// Emit a sequence of instructions to compare LHS and RHS using Pred. Return
1831// the result in DstReg.
1832// FIXME: Maybe we should expand this earlier.
1833bool RISCVInstructionSelector::selectFPCompare(MachineInstr &MI,
1834 MachineIRBuilder &MIB) const {
1835 auto &CmpMI = cast<GFCmp>(MI);
1836 CmpInst::Predicate Pred = CmpMI.getCond();
1837
1838 Register DstReg = CmpMI.getReg(0);
1839 Register LHS = CmpMI.getLHSReg();
1840 Register RHS = CmpMI.getRHSReg();
1841
1842 unsigned Size = MRI->getType(LHS).getSizeInBits();
1843 assert((Size == 16 || Size == 32 || Size == 64) && "Unexpected size");
1844
1845 Register TmpReg = DstReg;
1846
1847 bool NeedInvert = false;
1848 // First try swapping operands or inverting.
1849 if (legalizeFCmpPredicate(LHS, RHS, Pred, NeedInvert)) {
1850 if (NeedInvert)
1851 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1852 auto Cmp = MIB.buildInstr(getFCmpOpcode(Pred, Size), {TmpReg}, {LHS, RHS});
1853 Cmp.constrainAllUses(TII, TRI, RBI);
1854 } else if (Pred == CmpInst::FCMP_ONE || Pred == CmpInst::FCMP_UEQ) {
1855 // fcmp one LHS, RHS => (OR (FLT LHS, RHS), (FLT RHS, LHS))
1856 NeedInvert = Pred == CmpInst::FCMP_UEQ;
1858 {&RISCV::GPRRegClass}, {LHS, RHS});
1859 Cmp1.constrainAllUses(TII, TRI, RBI);
1861 {&RISCV::GPRRegClass}, {RHS, LHS});
1862 Cmp2.constrainAllUses(TII, TRI, RBI);
1863 if (NeedInvert)
1864 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1865 auto Or =
1866 MIB.buildInstr(RISCV::OR, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1867 Or.constrainAllUses(TII, TRI, RBI);
1868 } else if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) {
1869 // fcmp ord LHS, RHS => (AND (FEQ LHS, LHS), (FEQ RHS, RHS))
1870 // FIXME: If LHS and RHS are the same we can use a single FEQ.
1871 NeedInvert = Pred == CmpInst::FCMP_UNO;
1873 {&RISCV::GPRRegClass}, {LHS, LHS});
1874 Cmp1.constrainAllUses(TII, TRI, RBI);
1876 {&RISCV::GPRRegClass}, {RHS, RHS});
1877 Cmp2.constrainAllUses(TII, TRI, RBI);
1878 if (NeedInvert)
1879 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1880 auto And =
1881 MIB.buildInstr(RISCV::AND, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1882 And.constrainAllUses(TII, TRI, RBI);
1883 } else
1884 llvm_unreachable("Unhandled predicate");
1885
1886 // Emit an XORI to invert the result if needed.
1887 if (NeedInvert) {
1888 auto Xor = MIB.buildInstr(RISCV::XORI, {DstReg}, {TmpReg}).addImm(1);
1889 Xor.constrainAllUses(TII, TRI, RBI);
1890 }
1891
1892 MI.eraseFromParent();
1893 return true;
1894}
1895
1896void RISCVInstructionSelector::emitFence(AtomicOrdering FenceOrdering,
1897 SyncScope::ID FenceSSID,
1898 MachineIRBuilder &MIB) const {
1899 if (STI.hasStdExtZtso()) {
1900 // The only fence that needs an instruction is a sequentially-consistent
1901 // cross-thread fence.
1902 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
1903 FenceSSID == SyncScope::System) {
1904 // fence rw, rw
1905 MIB.buildInstr(RISCV::FENCE, {}, {})
1908 return;
1909 }
1910
1911 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
1912 MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1913 return;
1914 }
1915
1916 // singlethread fences only synchronize with signal handlers on the same
1917 // thread and thus only need to preserve instruction order, not actually
1918 // enforce memory ordering.
1919 if (FenceSSID == SyncScope::SingleThread) {
1920 MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1921 return;
1922 }
1923
1924 // Refer to Table A.6 in the version 2.3 draft of the RISC-V Instruction Set
1925 // Manual: Volume I.
1926 unsigned Pred, Succ;
1927 switch (FenceOrdering) {
1928 default:
1929 llvm_unreachable("Unexpected ordering");
1930 case AtomicOrdering::AcquireRelease:
1931 // fence acq_rel -> fence.tso
1932 MIB.buildInstr(RISCV::FENCE_TSO, {}, {});
1933 return;
1934 case AtomicOrdering::Acquire:
1935 // fence acquire -> fence r, rw
1936 Pred = RISCVFenceField::R;
1938 break;
1939 case AtomicOrdering::Release:
1940 // fence release -> fence rw, w
1942 Succ = RISCVFenceField::W;
1943 break;
1944 case AtomicOrdering::SequentiallyConsistent:
1945 // fence seq_cst -> fence rw, rw
1948 break;
1949 }
1950 MIB.buildInstr(RISCV::FENCE, {}, {}).addImm(Pred).addImm(Succ);
1951}
1952
1953namespace llvm {
1954InstructionSelector *
1956 const RISCVSubtarget &Subtarget,
1957 const RISCVRegisterBankInfo &RBI) {
1958 return new RISCVInstructionSelector(TM, Subtarget, RBI);
1959}
1960} // end namespace llvm
unsigned const MachineRegisterInfo * MRI
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Provides analysis for querying information about KnownBits during GISel passes.
#define DEBUG_TYPE
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
static bool hasAllWUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI)
static bool hasAllNBitUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI, unsigned OrigBits)
#define I(x, y, z)
Definition MD5.cpp:57
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
MachineInstr unsigned OpIdx
#define P(N)
static StringRef getName(Value *V)
static unsigned selectRegImmLoadStoreOp(unsigned GenericOpc, unsigned OpSize)
Select the RISC-V regimm opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the G...
static unsigned selectZalasrLoadStoreOp(unsigned GenericOpc, unsigned OpSize)
Select the RISC-V Zalasr opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the G...
static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size)
static bool legalizeFCmpPredicate(Register &LHS, Register &RHS, CmpInst::Predicate &Pred, bool &NeedInvert)
static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC, Register &LHS, Register &RHS, MachineRegisterInfo &MRI)
const SmallVectorImpl< MachineOperand > & Cond
This file declares the targeting of the RegisterBankInfo class for RISC-V.
#define LLVM_DEBUG(...)
Definition Debug.h:114
Value * RHS
Value * LHS
APInt bitcastToAPInt() const
Definition APFloat.h:1404
bool isPosZero() const
Definition APFloat.h:1523
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1503
bool ult(const APInt &RHS) const
Unsigned less than comparison.
Definition APInt.h:1118
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
Definition APInt.h:476
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
Definition APInt.h:287
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:679
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:682
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition InstrTypes.h:684
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition InstrTypes.h:687
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:683
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition InstrTypes.h:685
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:686
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition Constants.h:174
This is an important base class in LLVM.
Definition Constant.h:43
virtual void setupMF(MachineFunction &mf, GISelValueTracking *vt, CodeGenCoverage *covinfo=nullptr, ProfileSummaryInfo *psi=nullptr, BlockFrequencyInfo *bfi=nullptr)
Setup per-MF executor state.
Register getPointerReg() const
Get the source register of the pointer value.
MachineMemOperand & getMMO() const
Get the MachineMemOperand on this instruction.
LocationSize getMemSizeInBits() const
Returns the size in bits of the memory access.
Register getReg(unsigned Idx) const
Access the Idx'th operand as a register and return it.
constexpr unsigned getScalarSizeInBits() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
constexpr unsigned getAddressSpace() const
TypeSize getValue() const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Helper class to build MachineInstr.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildPtrToInt(const DstOp &Dst, const SrcOp &Src)
Build and insert a G_PTRTOINT instruction.
void constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addDef(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register definition operand.
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
unsigned getOperandNo(const_mop_iterator I) const
Returns the number of the operand iterator I points to.
const MachineOperand & getOperand(unsigned i) const
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Analysis providing profile information.
This class provides the information for the target register banks.
unsigned getXLen() const
std::optional< unsigned > getRealVLen() const
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
static unsigned getRegClassIDForVecVT(MVT VT)
static RISCVVType::VLMUL getLMUL(MVT VT)
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
This class implements the register bank concept.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
bool isPositionIndependent() const
CodeModel::Model getCodeModel() const
Returns the code model.
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
operand_type_match m_Reg()
SpecificConstantMatch m_SpecificICst(const APInt &RequestedValue)
Matches a constant equal to RequestedValue.
operand_type_match m_Pred()
UnaryOp_match< SrcTy, TargetOpcode::G_ZEXT > m_GZExt(const SrcTy &Src)
ConstantMatch< APInt > m_ICst(APInt &Cst)
BinaryOp_match< LHS, RHS, TargetOpcode::G_ADD, true > m_GAdd(const LHS &L, const RHS &R)
OneNonDBGUse_match< SubPat > m_OneNonDBGUse(const SubPat &SP)
CompareOp_match< Pred, LHS, RHS, TargetOpcode::G_ICMP > m_GICmp(const Pred &P, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SUB > m_GSub(const LHS &L, const RHS &R)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SHL, false > m_GShl(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_AND, true > m_GAnd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_LSHR, false > m_GLShr(const LHS &L, const RHS &R)
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
SmallVector< Inst, 8 > InstSeq
Definition RISCVMatInt.h:43
static unsigned decodeVSEW(unsigned VSEW)
LLVM_ABI unsigned getSEWLMULRatio(unsigned SEW, VLMUL VLMul)
LLVM_ABI unsigned encodeVTYPE(VLMUL VLMUL, unsigned SEW, bool TailAgnostic, bool MaskAgnostic, bool AltFmt=false)
static constexpr int64_t VLMaxSentinel
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
Definition LLVMContext.h:55
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
PointerUnion< const TargetRegisterClass *, const RegisterBank * > RegClassOrRegBank
Convenient type to represent either a register class or a register bank.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
bool isStrongerThanMonotonic(AtomicOrdering AO)
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
Definition bit.h:293
LLVM_ABI void constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition Utils.cpp:155
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
Definition bit.h:303
LLVM_ABI MVT getMVTForLLT(LLT Ty)
Get a rough equivalent of an MVT for a given LLT.
InstructionSelector * createRISCVInstructionSelector(const RISCVTargetMachine &TM, const RISCVSubtarget &Subtarget, const RISCVRegisterBankInfo &RBI)
LLVM_ABI std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
Definition Utils.cpp:314
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:202
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void reportGISelFailure(MachineFunction &MF, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel error as a missed optimization remark to the LLVMContext's diagnostic stream.
Definition Utils.cpp:258
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
AtomicOrdering
Atomic ordering for LLVM's memory model.
constexpr T maskTrailingZeros(unsigned N)
Create a bitmask with the N right-most bits set to 0, and all other bits set to 1.
Definition MathExtras.h:94
@ Or
Bitwise or logical OR of integers.
@ Xor
Bitwise or logical XOR of integers.
@ And
Bitwise or logical AND of integers.
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
Definition Utils.cpp:433
constexpr T maskTrailingOnes(unsigned N)
Create a bitmask with the N right-most bits set to 1, and all other bits set to 0.
Definition MathExtras.h:77
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:177
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define MORE()
Definition regcomp.c:246
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.