LLVM 23.0.0git
RISCVInstructionSelector.cpp
Go to the documentation of this file.
1//===-- RISCVInstructionSelector.cpp -----------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// RISC-V.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
16#include "RISCVSubtarget.h"
17#include "RISCVTargetMachine.h"
25#include "llvm/IR/IntrinsicsRISCV.h"
26#include "llvm/Support/Debug.h"
27
28#define DEBUG_TYPE "riscv-isel"
29
30using namespace llvm;
31using namespace MIPatternMatch;
32
33#define GET_GLOBALISEL_PREDICATE_BITSET
34#include "RISCVGenGlobalISel.inc"
35#undef GET_GLOBALISEL_PREDICATE_BITSET
36
37namespace {
38
39class RISCVInstructionSelector : public InstructionSelector {
40public:
41 RISCVInstructionSelector(const RISCVTargetMachine &TM,
42 const RISCVSubtarget &STI,
43 const RISCVRegisterBankInfo &RBI);
44
45 bool select(MachineInstr &MI) override;
46
47 void setupMF(MachineFunction &MF, GISelValueTracking *VT,
48 CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
49 BlockFrequencyInfo *BFI) override {
50 InstructionSelector::setupMF(MF, VT, CoverageInfo, PSI, BFI);
51 MRI = &MF.getRegInfo();
52 }
53
54 static const char *getName() { return DEBUG_TYPE; }
55
56private:
57 static constexpr unsigned MaxRecursionDepth = 6;
58
59 bool hasAllNBitUsers(const MachineInstr &MI, unsigned Bits,
60 const unsigned Depth = 0) const;
61 bool hasAllHUsers(const MachineInstr &MI) const {
62 return hasAllNBitUsers(MI, 16);
63 }
64 bool hasAllWUsers(const MachineInstr &MI) const {
65 return hasAllNBitUsers(MI, 32);
66 }
67
68 bool isRegInGprb(Register Reg) const;
69 bool isRegInFprb(Register Reg) const;
70
71 // tblgen-erated 'select' implementation, used as the initial selector for
72 // the patterns that don't require complex C++.
73 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
74
75 // A lowering phase that runs before any selection attempts.
76 // Returns true if the instruction was modified.
77 void preISelLower(MachineInstr &MI);
78
79 bool replacePtrWithInt(MachineOperand &Op);
80
81 // Custom selection methods
82 bool selectCopy(MachineInstr &MI) const;
83 bool selectImplicitDef(MachineInstr &MI) const;
84 bool materializeImm(Register Reg, int64_t Imm, MachineInstr &MI) const;
85 bool selectAddr(MachineInstr &MI, bool IsLocal = true,
86 bool IsExternWeak = false) const;
87 bool selectSelect(MachineInstr &MI) const;
88 bool selectFPCompare(MachineInstr &MI) const;
89 void emitFence(AtomicOrdering FenceOrdering, SyncScope::ID FenceSSID,
90 MachineInstr &MI) const;
92 void addVectorLoadStoreOperands(MachineInstr &I,
94 unsigned &CurOp, bool IsMasked,
95 bool IsStridedOrIndexed,
96 LLT *IndexVT = nullptr) const;
97 bool selectIntrinsicWithSideEffects(MachineInstr &I) const;
98 bool selectIntrinsic(MachineInstr &I) const;
99 bool selectExtractSubvector(MachineInstr &MI) const;
100 bool selectInsertSubVector(MachineInstr &I) const;
101 ComplexRendererFns selectShiftMask(MachineOperand &Root,
102 unsigned ShiftWidth) const;
103 ComplexRendererFns selectShiftMaskXLen(MachineOperand &Root) const {
104 return selectShiftMask(Root, STI.getXLen());
105 }
106 ComplexRendererFns selectShiftMask32(MachineOperand &Root) const {
107 return selectShiftMask(Root, 32);
108 }
109 ComplexRendererFns selectAddrRegImm(MachineOperand &Root) const;
110
111 ComplexRendererFns selectSExtBits(MachineOperand &Root, unsigned Bits) const;
112 template <unsigned Bits>
113 ComplexRendererFns selectSExtBits(MachineOperand &Root) const {
114 return selectSExtBits(Root, Bits);
115 }
116
117 ComplexRendererFns selectZExtBits(MachineOperand &Root, unsigned Bits) const;
118 template <unsigned Bits>
119 ComplexRendererFns selectZExtBits(MachineOperand &Root) const {
120 return selectZExtBits(Root, Bits);
121 }
122
123 ComplexRendererFns selectSHXADDOp(MachineOperand &Root, unsigned ShAmt) const;
124 template <unsigned ShAmt>
125 ComplexRendererFns selectSHXADDOp(MachineOperand &Root) const {
126 return selectSHXADDOp(Root, ShAmt);
127 }
128
129 ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root,
130 unsigned ShAmt) const;
131 template <unsigned ShAmt>
132 ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root) const {
133 return selectSHXADD_UWOp(Root, ShAmt);
134 }
135
136 ComplexRendererFns renderVLOp(MachineOperand &Root) const;
137
138 // Custom renderers for tablegen
139 void renderNegImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
140 int OpIdx) const;
141 void renderImmSubFromXLen(MachineInstrBuilder &MIB, const MachineInstr &MI,
142 int OpIdx) const;
143 void renderImmSubFrom32(MachineInstrBuilder &MIB, const MachineInstr &MI,
144 int OpIdx) const;
145 void renderImmPlus1(MachineInstrBuilder &MIB, const MachineInstr &MI,
146 int OpIdx) const;
147 void renderFrameIndex(MachineInstrBuilder &MIB, const MachineInstr &MI,
148 int OpIdx) const;
149
150 void renderTrailingZeros(MachineInstrBuilder &MIB, const MachineInstr &MI,
151 int OpIdx) const;
152 void renderXLenSubTrailingOnes(MachineInstrBuilder &MIB,
153 const MachineInstr &MI, int OpIdx) const;
154
155 void renderAddiPairImmLarge(MachineInstrBuilder &MIB, const MachineInstr &MI,
156 int OpIdx) const;
157 void renderAddiPairImmSmall(MachineInstrBuilder &MIB, const MachineInstr &MI,
158 int OpIdx) const;
159
160 const RISCVSubtarget &STI;
161 const RISCVInstrInfo &TII;
162 const RISCVRegisterInfo &TRI;
163 const RISCVRegisterBankInfo &RBI;
164 const RISCVTargetMachine &TM;
165
166 MachineRegisterInfo *MRI = nullptr;
167
168 // FIXME: This is necessary because DAGISel uses "Subtarget->" and GlobalISel
169 // uses "STI." in the code generated by TableGen. We need to unify the name of
170 // Subtarget variable.
171 const RISCVSubtarget *Subtarget = &STI;
172
173#define GET_GLOBALISEL_PREDICATES_DECL
174#include "RISCVGenGlobalISel.inc"
175#undef GET_GLOBALISEL_PREDICATES_DECL
176
177#define GET_GLOBALISEL_TEMPORARIES_DECL
178#include "RISCVGenGlobalISel.inc"
179#undef GET_GLOBALISEL_TEMPORARIES_DECL
180};
181
182} // end anonymous namespace
183
184#define GET_GLOBALISEL_IMPL
185#include "RISCVGenGlobalISel.inc"
186#undef GET_GLOBALISEL_IMPL
187
188RISCVInstructionSelector::RISCVInstructionSelector(
189 const RISCVTargetMachine &TM, const RISCVSubtarget &STI,
190 const RISCVRegisterBankInfo &RBI)
191 : STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI),
192 TM(TM),
193
195#include "RISCVGenGlobalISel.inc"
198#include "RISCVGenGlobalISel.inc"
200{
201}
202
203// Mimics optimizations in ISel and RISCVOptWInst Pass
204bool RISCVInstructionSelector::hasAllNBitUsers(const MachineInstr &MI,
205 unsigned Bits,
206 const unsigned Depth) const {
207
208 assert((MI.getOpcode() == TargetOpcode::G_ADD ||
209 MI.getOpcode() == TargetOpcode::G_SUB ||
210 MI.getOpcode() == TargetOpcode::G_MUL ||
211 MI.getOpcode() == TargetOpcode::G_SHL ||
212 MI.getOpcode() == TargetOpcode::G_LSHR ||
213 MI.getOpcode() == TargetOpcode::G_AND ||
214 MI.getOpcode() == TargetOpcode::G_OR ||
215 MI.getOpcode() == TargetOpcode::G_XOR ||
216 MI.getOpcode() == TargetOpcode::G_SEXT_INREG || Depth != 0) &&
217 "Unexpected opcode");
218
219 if (Depth >= RISCVInstructionSelector::MaxRecursionDepth)
220 return false;
221
222 auto DestReg = MI.getOperand(0).getReg();
223 for (auto &UserOp : MRI->use_nodbg_operands(DestReg)) {
224 assert(UserOp.getParent() && "UserOp must have a parent");
225 const MachineInstr &UserMI = *UserOp.getParent();
226 unsigned OpIdx = UserOp.getOperandNo();
227
228 switch (UserMI.getOpcode()) {
229 default:
230 return false;
231 case RISCV::ADDW:
232 case RISCV::ADDIW:
233 case RISCV::SUBW:
234 case RISCV::FCVT_D_W:
235 case RISCV::FCVT_S_W:
236 if (Bits >= 32)
237 break;
238 return false;
239 case RISCV::SLL:
240 case RISCV::SRA:
241 case RISCV::SRL:
242 // Shift amount operands only use log2(Xlen) bits.
243 if (OpIdx == 2 && Bits >= Log2_32(Subtarget->getXLen()))
244 break;
245 return false;
246 case RISCV::SLLI:
247 // SLLI only uses the lower (XLen - ShAmt) bits.
248 if (Bits >= Subtarget->getXLen() - UserMI.getOperand(2).getImm())
249 break;
250 return false;
251 case RISCV::ANDI:
252 if (Bits >= (unsigned)llvm::bit_width<uint64_t>(
253 (uint64_t)UserMI.getOperand(2).getImm()))
254 break;
255 goto RecCheck;
256 case RISCV::AND:
257 case RISCV::OR:
258 case RISCV::XOR:
259 RecCheck:
260 if (hasAllNBitUsers(UserMI, Bits, Depth + 1))
261 break;
262 return false;
263 case RISCV::SRLI: {
264 unsigned ShAmt = UserMI.getOperand(2).getImm();
265 // If we are shifting right by less than Bits, and users don't demand any
266 // bits that were shifted into [Bits-1:0], then we can consider this as an
267 // N-Bit user.
268 if (Bits > ShAmt && hasAllNBitUsers(UserMI, Bits - ShAmt, Depth + 1))
269 break;
270 return false;
271 }
272 }
273 }
274
275 return true;
276}
277
278InstructionSelector::ComplexRendererFns
279RISCVInstructionSelector::selectShiftMask(MachineOperand &Root,
280 unsigned ShiftWidth) const {
281 if (!Root.isReg())
282 return std::nullopt;
283
284 using namespace llvm::MIPatternMatch;
285
286 Register ShAmtReg = Root.getReg();
287 // Peek through zext.
288 Register ZExtSrcReg;
289 if (mi_match(ShAmtReg, *MRI, m_GZExt(m_Reg(ZExtSrcReg))))
290 ShAmtReg = ZExtSrcReg;
291
292 APInt AndMask;
293 Register AndSrcReg;
294 // Try to combine the following pattern (applicable to other shift
295 // instructions as well as 32-bit ones):
296 //
297 // %4:gprb(s64) = G_AND %3, %2
298 // %5:gprb(s64) = G_LSHR %1, %4(s64)
299 //
300 // According to RISC-V's ISA manual, SLL, SRL, and SRA ignore other bits than
301 // the lowest log2(XLEN) bits of register rs2. As for the above pattern, if
302 // the lowest log2(XLEN) bits of register rd and rs2 of G_AND are the same,
303 // then it can be eliminated. Given register rs1 or rs2 holding a constant
304 // (the and mask), there are two cases G_AND can be erased:
305 //
306 // 1. the lowest log2(XLEN) bits of the and mask are all set
307 // 2. the bits of the register being masked are already unset (zero set)
308 if (mi_match(ShAmtReg, *MRI, m_GAnd(m_Reg(AndSrcReg), m_ICst(AndMask)))) {
309 APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
310 if (ShMask.isSubsetOf(AndMask)) {
311 ShAmtReg = AndSrcReg;
312 } else {
313 // SimplifyDemandedBits may have optimized the mask so try restoring any
314 // bits that are known zero.
315 KnownBits Known = VT->getKnownBits(AndSrcReg);
316 if (ShMask.isSubsetOf(AndMask | Known.Zero))
317 ShAmtReg = AndSrcReg;
318 }
319 }
320
321 APInt Imm;
323 if (mi_match(ShAmtReg, *MRI, m_GAdd(m_Reg(Reg), m_ICst(Imm)))) {
324 if (Imm != 0 && Imm.urem(ShiftWidth) == 0)
325 // If we are shifting by X+N where N == 0 mod Size, then just shift by X
326 // to avoid the ADD.
327 ShAmtReg = Reg;
328 } else if (mi_match(ShAmtReg, *MRI, m_GSub(m_ICst(Imm), m_Reg(Reg)))) {
329 if (Imm != 0 && Imm.urem(ShiftWidth) == 0) {
330 // If we are shifting by N-X where N == 0 mod Size, then just shift by -X
331 // to generate a NEG instead of a SUB of a constant.
332 ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
333 unsigned NegOpc = Subtarget->is64Bit() ? RISCV::SUBW : RISCV::SUB;
334 return {{[=](MachineInstrBuilder &MIB) {
335 MachineIRBuilder(*MIB.getInstr())
336 .buildInstr(NegOpc, {ShAmtReg}, {Register(RISCV::X0), Reg});
337 MIB.addReg(ShAmtReg);
338 }}};
339 }
340 if (Imm.urem(ShiftWidth) == ShiftWidth - 1) {
341 // If we are shifting by N-X where N == -1 mod Size, then just shift by ~X
342 // to generate a NOT instead of a SUB of a constant.
343 ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
344 return {{[=](MachineInstrBuilder &MIB) {
345 MachineIRBuilder(*MIB.getInstr())
346 .buildInstr(RISCV::XORI, {ShAmtReg}, {Reg})
347 .addImm(-1);
348 MIB.addReg(ShAmtReg);
349 }}};
350 }
351 }
352
353 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(ShAmtReg); }}};
354}
355
356InstructionSelector::ComplexRendererFns
357RISCVInstructionSelector::selectSExtBits(MachineOperand &Root,
358 unsigned Bits) const {
359 if (!Root.isReg())
360 return std::nullopt;
361 Register RootReg = Root.getReg();
362 MachineInstr *RootDef = MRI->getVRegDef(RootReg);
363
364 if (RootDef->getOpcode() == TargetOpcode::G_SEXT_INREG &&
365 RootDef->getOperand(2).getImm() == Bits) {
366 return {
367 {[=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); }}};
368 }
369
370 unsigned Size = MRI->getType(RootReg).getScalarSizeInBits();
371 if ((Size - VT->computeNumSignBits(RootReg)) < Bits)
372 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
373
374 return std::nullopt;
375}
376
377InstructionSelector::ComplexRendererFns
378RISCVInstructionSelector::selectZExtBits(MachineOperand &Root,
379 unsigned Bits) const {
380 if (!Root.isReg())
381 return std::nullopt;
382 Register RootReg = Root.getReg();
383
384 Register RegX;
385 uint64_t Mask = maskTrailingOnes<uint64_t>(Bits);
386 if (mi_match(RootReg, *MRI, m_GAnd(m_Reg(RegX), m_SpecificICst(Mask)))) {
387 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
388 }
389
390 if (mi_match(RootReg, *MRI, m_GZExt(m_Reg(RegX))) &&
391 MRI->getType(RegX).getScalarSizeInBits() == Bits)
392 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
393
394 unsigned Size = MRI->getType(RootReg).getScalarSizeInBits();
395 if (VT->maskedValueIsZero(RootReg, APInt::getBitsSetFrom(Size, Bits)))
396 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
397
398 return std::nullopt;
399}
400
401InstructionSelector::ComplexRendererFns
402RISCVInstructionSelector::selectSHXADDOp(MachineOperand &Root,
403 unsigned ShAmt) const {
404 using namespace llvm::MIPatternMatch;
405
406 if (!Root.isReg())
407 return std::nullopt;
408 Register RootReg = Root.getReg();
409
410 const unsigned XLen = STI.getXLen();
411 APInt Mask, C2;
412 Register RegY;
413 std::optional<bool> LeftShift;
414 // (and (shl y, c2), mask)
415 if (mi_match(RootReg, *MRI,
416 m_GAnd(m_GShl(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask))))
417 LeftShift = true;
418 // (and (lshr y, c2), mask)
419 else if (mi_match(RootReg, *MRI,
420 m_GAnd(m_GLShr(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask))))
421 LeftShift = false;
422
423 if (LeftShift.has_value()) {
424 if (*LeftShift)
426 else
428
429 if (Mask.isShiftedMask()) {
430 unsigned Leading = XLen - Mask.getActiveBits();
431 unsigned Trailing = Mask.countr_zero();
432 // Given (and (shl y, c2), mask) in which mask has no leading zeros and
433 // c3 trailing zeros. We can use an SRLI by c3 - c2 followed by a SHXADD.
434 if (*LeftShift && Leading == 0 && C2.ult(Trailing) && Trailing == ShAmt) {
435 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
436 return {{[=](MachineInstrBuilder &MIB) {
437 MachineIRBuilder(*MIB.getInstr())
438 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
439 .addImm(Trailing - C2.getLimitedValue());
440 MIB.addReg(DstReg);
441 }}};
442 }
443
444 // Given (and (lshr y, c2), mask) in which mask has c2 leading zeros and
445 // c3 trailing zeros. We can use an SRLI by c2 + c3 followed by a SHXADD.
446 if (!*LeftShift && Leading == C2 && Trailing == ShAmt) {
447 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
448 return {{[=](MachineInstrBuilder &MIB) {
449 MachineIRBuilder(*MIB.getInstr())
450 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
451 .addImm(Leading + Trailing);
452 MIB.addReg(DstReg);
453 }}};
454 }
455 }
456 }
457
458 LeftShift.reset();
459
460 // (shl (and y, mask), c2)
461 if (mi_match(RootReg, *MRI,
462 m_GShl(m_OneNonDBGUse(m_GAnd(m_Reg(RegY), m_ICst(Mask))),
463 m_ICst(C2))))
464 LeftShift = true;
465 // (lshr (and y, mask), c2)
466 else if (mi_match(RootReg, *MRI,
468 m_ICst(C2))))
469 LeftShift = false;
470
471 if (LeftShift.has_value() && Mask.isShiftedMask()) {
472 unsigned Leading = XLen - Mask.getActiveBits();
473 unsigned Trailing = Mask.countr_zero();
474
475 // Given (shl (and y, mask), c2) in which mask has 32 leading zeros and
476 // c3 trailing zeros. If c1 + c3 == ShAmt, we can emit SRLIW + SHXADD.
477 bool Cond = *LeftShift && Leading == 32 && Trailing > 0 &&
478 (Trailing + C2.getLimitedValue()) == ShAmt;
479 if (!Cond)
480 // Given (lshr (and y, mask), c2) in which mask has 32 leading zeros and
481 // c3 trailing zeros. If c3 - c1 == ShAmt, we can emit SRLIW + SHXADD.
482 Cond = !*LeftShift && Leading == 32 && C2.ult(Trailing) &&
483 (Trailing - C2.getLimitedValue()) == ShAmt;
484
485 if (Cond) {
486 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
487 return {{[=](MachineInstrBuilder &MIB) {
488 MachineIRBuilder(*MIB.getInstr())
489 .buildInstr(RISCV::SRLIW, {DstReg}, {RegY})
490 .addImm(Trailing);
491 MIB.addReg(DstReg);
492 }}};
493 }
494 }
495
496 return std::nullopt;
497}
498
499InstructionSelector::ComplexRendererFns
500RISCVInstructionSelector::selectSHXADD_UWOp(MachineOperand &Root,
501 unsigned ShAmt) const {
502 using namespace llvm::MIPatternMatch;
503
504 if (!Root.isReg())
505 return std::nullopt;
506 Register RootReg = Root.getReg();
507
508 // Given (and (shl x, c2), mask) in which mask is a shifted mask with
509 // 32 - ShAmt leading zeros and c2 trailing zeros. We can use SLLI by
510 // c2 - ShAmt followed by SHXADD_UW with ShAmt for x amount.
511 APInt Mask, C2;
512 Register RegX;
513 if (mi_match(
514 RootReg, *MRI,
516 m_ICst(Mask))))) {
518
519 if (Mask.isShiftedMask()) {
520 unsigned Leading = Mask.countl_zero();
521 unsigned Trailing = Mask.countr_zero();
522 if (Leading == 32 - ShAmt && C2 == Trailing && Trailing > ShAmt) {
523 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
524 return {{[=](MachineInstrBuilder &MIB) {
525 MachineIRBuilder(*MIB.getInstr())
526 .buildInstr(RISCV::SLLI, {DstReg}, {RegX})
527 .addImm(C2.getLimitedValue() - ShAmt);
528 MIB.addReg(DstReg);
529 }}};
530 }
531 }
532 }
533
534 return std::nullopt;
535}
536
537InstructionSelector::ComplexRendererFns
538RISCVInstructionSelector::renderVLOp(MachineOperand &Root) const {
539 assert(Root.isReg() && "Expected operand to be a Register");
540 MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
541
542 if (RootDef->getOpcode() == TargetOpcode::G_CONSTANT) {
543 auto C = RootDef->getOperand(1).getCImm();
544 if (C->getValue().isAllOnes())
545 // If the operand is a G_CONSTANT with value of all ones it is larger than
546 // VLMAX. We convert it to an immediate with value VLMaxSentinel. This is
547 // recognized specially by the vsetvli insertion pass.
548 return {{[=](MachineInstrBuilder &MIB) {
549 MIB.addImm(RISCV::VLMaxSentinel);
550 }}};
551
552 if (isUInt<5>(C->getZExtValue())) {
553 uint64_t ZExtC = C->getZExtValue();
554 return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(ZExtC); }}};
555 }
556 }
557 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); }}};
558}
559
560InstructionSelector::ComplexRendererFns
561RISCVInstructionSelector::selectAddrRegImm(MachineOperand &Root) const {
562 if (!Root.isReg())
563 return std::nullopt;
564
565 MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
566 if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
567 return {{
568 [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); },
569 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
570 }};
571 }
572
573 if (isBaseWithConstantOffset(Root, *MRI)) {
574 MachineOperand &LHS = RootDef->getOperand(1);
575 MachineOperand &RHS = RootDef->getOperand(2);
576 MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
577 MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
578
579 int64_t RHSC = RHSDef->getOperand(1).getCImm()->getSExtValue();
580 if (isInt<12>(RHSC)) {
581 if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
582 return {{
583 [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->getOperand(1)); },
584 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
585 }};
586
587 return {{[=](MachineInstrBuilder &MIB) { MIB.add(LHS); },
588 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); }}};
589 }
590 }
591
592 // TODO: Need to get the immediate from a G_PTR_ADD. Should this be done in
593 // the combiner?
594 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
595 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }}};
596}
597
598/// Returns the RISCVCC::CondCode that corresponds to the CmpInst::Predicate CC.
599/// CC Must be an ICMP Predicate.
600static RISCVCC::CondCode getRISCVCCFromICmp(CmpInst::Predicate CC) {
601 switch (CC) {
602 default:
603 llvm_unreachable("Expected ICMP CmpInst::Predicate.");
604 case CmpInst::Predicate::ICMP_EQ:
605 return RISCVCC::COND_EQ;
606 case CmpInst::Predicate::ICMP_NE:
607 return RISCVCC::COND_NE;
608 case CmpInst::Predicate::ICMP_ULT:
609 return RISCVCC::COND_LTU;
610 case CmpInst::Predicate::ICMP_SLT:
611 return RISCVCC::COND_LT;
612 case CmpInst::Predicate::ICMP_UGE:
613 return RISCVCC::COND_GEU;
614 case CmpInst::Predicate::ICMP_SGE:
615 return RISCVCC::COND_GE;
616 }
617}
618
621 MachineRegisterInfo &MRI) {
622 // Try to fold an ICmp. If that fails, use a NE compare with X0.
624 if (!mi_match(CondReg, MRI, m_GICmp(m_Pred(Pred), m_Reg(LHS), m_Reg(RHS)))) {
625 LHS = CondReg;
626 RHS = RISCV::X0;
627 CC = RISCVCC::COND_NE;
628 return;
629 }
630
631 // We found an ICmp, do some canonicalization.
632
633 // Adjust comparisons to use comparison with 0 if possible.
634 if (auto Constant = getIConstantVRegSExtVal(RHS, MRI)) {
635 switch (Pred) {
637 // Convert X > -1 to X >= 0
638 if (*Constant == -1) {
639 CC = RISCVCC::COND_GE;
640 RHS = RISCV::X0;
641 return;
642 }
643 break;
645 // Convert X < 1 to 0 >= X
646 if (*Constant == 1) {
647 CC = RISCVCC::COND_GE;
648 RHS = LHS;
649 LHS = RISCV::X0;
650 return;
651 }
652 break;
653 default:
654 break;
655 }
656 }
657
658 switch (Pred) {
659 default:
660 llvm_unreachable("Expected ICMP CmpInst::Predicate.");
667 // These CCs are supported directly by RISC-V branches.
668 break;
673 // These CCs are not supported directly by RISC-V branches, but changing the
674 // direction of the CC and swapping LHS and RHS are.
675 Pred = CmpInst::getSwappedPredicate(Pred);
676 std::swap(LHS, RHS);
677 break;
678 }
679
680 CC = getRISCVCCFromICmp(Pred);
681}
682
683/// Select the RISC-V Zalasr opcode for the G_LOAD or G_STORE operation
684/// \p GenericOpc, appropriate for the GPR register bank and of memory access
685/// size \p OpSize.
686static unsigned selectZalasrLoadStoreOp(unsigned GenericOpc, unsigned OpSize) {
687 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
688 switch (OpSize) {
689 default:
690 llvm_unreachable("Unexpected memory size");
691 case 8:
692 return IsStore ? RISCV::SB_RL : RISCV::LB_AQ;
693 case 16:
694 return IsStore ? RISCV::SH_RL : RISCV::LH_AQ;
695 case 32:
696 return IsStore ? RISCV::SW_RL : RISCV::LW_AQ;
697 case 64:
698 return IsStore ? RISCV::SD_RL : RISCV::LD_AQ;
699 }
700}
701
702/// Select the RISC-V regimm opcode for the G_LOAD or G_STORE operation
703/// \p GenericOpc, appropriate for the GPR register bank and of memory access
704/// size \p OpSize. \returns \p GenericOpc if the combination is unsupported.
705static unsigned selectRegImmLoadStoreOp(unsigned GenericOpc, unsigned OpSize) {
706 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
707 switch (OpSize) {
708 case 8:
709 // Prefer unsigned due to no c.lb in Zcb.
710 return IsStore ? RISCV::SB : RISCV::LBU;
711 case 16:
712 return IsStore ? RISCV::SH : RISCV::LH;
713 case 32:
714 return IsStore ? RISCV::SW : RISCV::LW;
715 case 64:
716 return IsStore ? RISCV::SD : RISCV::LD;
717 }
718
719 return GenericOpc;
720}
721
722void RISCVInstructionSelector::addVectorLoadStoreOperands(
723 MachineInstr &I, SmallVectorImpl<Register> &SrcOps, unsigned &CurOp,
724 bool IsMasked, bool IsStridedOrIndexed, LLT *IndexVT) const {
725 // Base Pointer
726 auto PtrReg = I.getOperand(CurOp++).getReg();
727 SrcOps.push_back(PtrReg);
728
729 // Stride or Index
730 if (IsStridedOrIndexed) {
731 auto StrideReg = I.getOperand(CurOp++).getReg();
732 SrcOps.push_back(StrideReg);
733 if (IndexVT)
734 *IndexVT = MRI->getType(StrideReg);
735 }
736
737 // Mask
738 if (IsMasked) {
739 auto MaskReg = I.getOperand(CurOp++).getReg();
740 SrcOps.push_back(MaskReg);
741 }
742}
743
744bool RISCVInstructionSelector::selectIntrinsicWithSideEffects(
745 MachineInstr &I) const {
746 // Find the intrinsic ID.
747 unsigned IntrinID = cast<GIntrinsic>(I).getIntrinsicID();
748 // Select the instruction.
749 switch (IntrinID) {
750 default:
751 return false;
752 case Intrinsic::riscv_vlm:
753 case Intrinsic::riscv_vle:
754 case Intrinsic::riscv_vle_mask:
755 case Intrinsic::riscv_vlse:
756 case Intrinsic::riscv_vlse_mask: {
757 bool IsMasked = IntrinID == Intrinsic::riscv_vle_mask ||
758 IntrinID == Intrinsic::riscv_vlse_mask;
759 bool IsStrided = IntrinID == Intrinsic::riscv_vlse ||
760 IntrinID == Intrinsic::riscv_vlse_mask;
761 LLT VT = MRI->getType(I.getOperand(0).getReg());
762 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
763
764 // Result vector
765 const Register DstReg = I.getOperand(0).getReg();
766
767 // Sources
768 bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm;
769 unsigned CurOp = 2;
770 SmallVector<Register, 4> SrcOps; // Source registers.
771
772 // Passthru
773 if (HasPassthruOperand) {
774 auto PassthruReg = I.getOperand(CurOp++).getReg();
775 SrcOps.push_back(PassthruReg);
776 } else {
777 SrcOps.push_back(Register(RISCV::NoRegister));
778 }
779
780 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, IsStrided);
781
783 const RISCV::VLEPseudo *P =
784 RISCV::getVLEPseudo(IsMasked, IsStrided, /*FF*/ false, Log2SEW,
785 static_cast<unsigned>(LMUL));
786
787 MachineInstrBuilder PseudoMI =
788 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(P->Pseudo), DstReg);
789 for (Register Reg : SrcOps)
790 PseudoMI.addReg(Reg);
791
792 // Select VL
793 auto VLOpFn = renderVLOp(I.getOperand(CurOp++));
794 for (auto &RenderFn : *VLOpFn)
795 RenderFn(PseudoMI);
796
797 // SEW
798 PseudoMI.addImm(Log2SEW);
799
800 // Policy
801 uint64_t Policy = RISCVVType::MASK_AGNOSTIC;
802 if (IsMasked)
803 Policy = I.getOperand(CurOp++).getImm();
804 PseudoMI.addImm(Policy);
805
806 // Memref
807 PseudoMI.cloneMemRefs(I);
808
809 I.eraseFromParent();
810 constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
811 return true;
812 }
813 case Intrinsic::riscv_vloxei:
814 case Intrinsic::riscv_vloxei_mask:
815 case Intrinsic::riscv_vluxei:
816 case Intrinsic::riscv_vluxei_mask: {
817 bool IsMasked = IntrinID == Intrinsic::riscv_vloxei_mask ||
818 IntrinID == Intrinsic::riscv_vluxei_mask;
819 bool IsOrdered = IntrinID == Intrinsic::riscv_vloxei ||
820 IntrinID == Intrinsic::riscv_vloxei_mask;
821 LLT VT = MRI->getType(I.getOperand(0).getReg());
822 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
823
824 // Result vector
825 const Register DstReg = I.getOperand(0).getReg();
826
827 // Sources
828 bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm;
829 unsigned CurOp = 2;
830 SmallVector<Register, 4> SrcOps; // Source registers.
831
832 // Passthru
833 if (HasPassthruOperand) {
834 auto PassthruReg = I.getOperand(CurOp++).getReg();
835 SrcOps.push_back(PassthruReg);
836 } else {
837 // Use NoRegister if there is no specified passthru.
838 SrcOps.push_back(Register());
839 }
840 LLT IndexVT;
841 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, true, &IndexVT);
842
844 RISCVVType::VLMUL IndexLMUL =
846 unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
847 if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
848 reportFatalUsageError("The V extension does not support EEW=64 for index "
849 "values when XLEN=32");
850 }
851 const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
852 IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
853 static_cast<unsigned>(IndexLMUL));
854
855 MachineInstrBuilder PseudoMI =
856 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(P->Pseudo), DstReg);
857 for (Register Reg : SrcOps)
858 PseudoMI.addReg(Reg);
859
860 // Select VL
861 auto VLOpFn = renderVLOp(I.getOperand(CurOp++));
862 for (auto &RenderFn : *VLOpFn)
863 RenderFn(PseudoMI);
864
865 // SEW
866 PseudoMI.addImm(Log2SEW);
867
868 // Policy
869 uint64_t Policy = RISCVVType::MASK_AGNOSTIC;
870 if (IsMasked)
871 Policy = I.getOperand(CurOp++).getImm();
872 PseudoMI.addImm(Policy);
873
874 // Memref
875 PseudoMI.cloneMemRefs(I);
876
877 I.eraseFromParent();
878 constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
879 return true;
880 }
881 case Intrinsic::riscv_vsm:
882 case Intrinsic::riscv_vse:
883 case Intrinsic::riscv_vse_mask:
884 case Intrinsic::riscv_vsse:
885 case Intrinsic::riscv_vsse_mask: {
886 bool IsMasked = IntrinID == Intrinsic::riscv_vse_mask ||
887 IntrinID == Intrinsic::riscv_vsse_mask;
888 bool IsStrided = IntrinID == Intrinsic::riscv_vsse ||
889 IntrinID == Intrinsic::riscv_vsse_mask;
890 LLT VT = MRI->getType(I.getOperand(1).getReg());
891 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
892
893 // Sources
894 unsigned CurOp = 1;
895 SmallVector<Register, 4> SrcOps; // Source registers.
896
897 // Store value
898 auto PassthruReg = I.getOperand(CurOp++).getReg();
899 SrcOps.push_back(PassthruReg);
900
901 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, IsStrided);
902
904 const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
905 IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
906
907 MachineInstrBuilder PseudoMI =
908 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(P->Pseudo));
909 for (Register Reg : SrcOps)
910 PseudoMI.addReg(Reg);
911
912 // Select VL
913 auto VLOpFn = renderVLOp(I.getOperand(CurOp++));
914 for (auto &RenderFn : *VLOpFn)
915 RenderFn(PseudoMI);
916
917 // SEW
918 PseudoMI.addImm(Log2SEW);
919
920 // Memref
921 PseudoMI.cloneMemRefs(I);
922
923 I.eraseFromParent();
924 constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
925 return true;
926 }
927 case Intrinsic::riscv_vsoxei:
928 case Intrinsic::riscv_vsoxei_mask:
929 case Intrinsic::riscv_vsuxei:
930 case Intrinsic::riscv_vsuxei_mask: {
931 bool IsMasked = IntrinID == Intrinsic::riscv_vsoxei_mask ||
932 IntrinID == Intrinsic::riscv_vsuxei_mask;
933 bool IsOrdered = IntrinID == Intrinsic::riscv_vsoxei ||
934 IntrinID == Intrinsic::riscv_vsoxei_mask;
935 LLT VT = MRI->getType(I.getOperand(1).getReg());
936 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
937
938 // Sources
939 unsigned CurOp = 1;
940 SmallVector<Register, 4> SrcOps; // Source registers.
941
942 // Store value
943 auto PassthruReg = I.getOperand(CurOp++).getReg();
944 SrcOps.push_back(PassthruReg);
945
946 LLT IndexVT;
947 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, true, &IndexVT);
948
950 RISCVVType::VLMUL IndexLMUL =
952 unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
953 if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
954 reportFatalUsageError("The V extension does not support EEW=64 for index "
955 "values when XLEN=32");
956 }
957 const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
958 IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
959 static_cast<unsigned>(IndexLMUL));
960
961 MachineInstrBuilder PseudoMI =
962 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(P->Pseudo));
963 for (Register Reg : SrcOps)
964 PseudoMI.addReg(Reg);
965
966 // Select VL
967 auto VLOpFn = renderVLOp(I.getOperand(CurOp++));
968 for (auto &RenderFn : *VLOpFn)
969 RenderFn(PseudoMI);
970
971 // SEW
972 PseudoMI.addImm(Log2SEW);
973
974 // Memref
975 PseudoMI.cloneMemRefs(I);
976
977 I.eraseFromParent();
978 constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
979 return true;
980 }
981 }
982}
983
984bool RISCVInstructionSelector::selectIntrinsic(MachineInstr &I) const {
985 // Find the intrinsic ID.
986 unsigned IntrinID = cast<GIntrinsic>(I).getIntrinsicID();
987 // Select the instruction.
988 switch (IntrinID) {
989 default:
990 return false;
991 case Intrinsic::riscv_vsetvli:
992 case Intrinsic::riscv_vsetvlimax: {
993
994 bool VLMax = IntrinID == Intrinsic::riscv_vsetvlimax;
995
996 unsigned Offset = VLMax ? 2 : 3;
997 unsigned SEW = RISCVVType::decodeVSEW(I.getOperand(Offset).getImm() & 0x7);
998 RISCVVType::VLMUL VLMul =
999 static_cast<RISCVVType::VLMUL>(I.getOperand(Offset + 1).getImm() & 0x7);
1000
1001 unsigned VTypeI = RISCVVType::encodeVTYPE(VLMul, SEW, /*TailAgnostic*/ true,
1002 /*MaskAgnostic*/ true);
1003
1004 Register DstReg = I.getOperand(0).getReg();
1005
1006 Register VLOperand;
1007 unsigned Opcode = RISCV::PseudoVSETVLI;
1008
1009 // Check if AVL is a constant that equals VLMAX.
1010 if (!VLMax) {
1011 Register AVLReg = I.getOperand(2).getReg();
1012 if (auto AVLConst = getIConstantVRegValWithLookThrough(AVLReg, *MRI)) {
1013 uint64_t AVL = AVLConst->Value.getZExtValue();
1014 if (auto VLEN = Subtarget->getRealVLen()) {
1015 if (*VLEN / RISCVVType::getSEWLMULRatio(SEW, VLMul) == AVL)
1016 VLMax = true;
1017 }
1018 }
1019
1020 MachineInstr *AVLDef = MRI->getVRegDef(AVLReg);
1021 if (AVLDef && AVLDef->getOpcode() == TargetOpcode::G_CONSTANT) {
1022 const auto *C = AVLDef->getOperand(1).getCImm();
1023 if (C->getValue().isAllOnes())
1024 VLMax = true;
1025 }
1026 }
1027
1028 if (VLMax) {
1029 VLOperand = Register(RISCV::X0);
1030 Opcode = RISCV::PseudoVSETVLIX0;
1031 } else {
1032 Register AVLReg = I.getOperand(2).getReg();
1033 VLOperand = AVLReg;
1034
1035 // Check if AVL is a small constant that can use PseudoVSETIVLI.
1036 if (auto AVLConst = getIConstantVRegValWithLookThrough(AVLReg, *MRI)) {
1037 uint64_t AVL = AVLConst->Value.getZExtValue();
1038 if (isUInt<5>(AVL)) {
1039 MachineInstr *PseudoMI =
1040 BuildMI(*I.getParent(), I, I.getDebugLoc(),
1041 TII.get(RISCV::PseudoVSETIVLI), DstReg)
1042 .addImm(AVL)
1043 .addImm(VTypeI);
1044 I.eraseFromParent();
1045 constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
1046 return true;
1047 }
1048 }
1049 }
1050
1051 MachineInstr *PseudoMI =
1052 BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode), DstReg)
1053 .addReg(VLOperand)
1054 .addImm(VTypeI);
1055 I.eraseFromParent();
1056 constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
1057 return true;
1058 }
1059 }
1060}
1061
1062bool RISCVInstructionSelector::selectExtractSubvector(MachineInstr &MI) const {
1063 assert(MI.getOpcode() == TargetOpcode::G_EXTRACT_SUBVECTOR);
1064
1065 Register DstReg = MI.getOperand(0).getReg();
1066 Register SrcReg = MI.getOperand(1).getReg();
1067
1068 LLT DstTy = MRI->getType(DstReg);
1069 LLT SrcTy = MRI->getType(SrcReg);
1070
1071 unsigned Idx = static_cast<unsigned>(MI.getOperand(2).getImm());
1072
1073 MVT DstMVT = getMVTForLLT(DstTy);
1074 MVT SrcMVT = getMVTForLLT(SrcTy);
1075
1076 unsigned SubRegIdx;
1077 std::tie(SubRegIdx, Idx) =
1079 SrcMVT, DstMVT, Idx, &TRI);
1080
1081 if (Idx != 0)
1082 return false;
1083
1084 unsigned DstRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(DstMVT);
1085 const TargetRegisterClass *DstRC = TRI.getRegClass(DstRegClassID);
1086 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
1087 return false;
1088
1089 unsigned SrcRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(SrcMVT);
1090 const TargetRegisterClass *SrcRC = TRI.getRegClass(SrcRegClassID);
1091 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
1092 return false;
1093
1094 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII.get(TargetOpcode::COPY),
1095 DstReg)
1096 .addReg(SrcReg, {}, SubRegIdx);
1097
1098 MI.eraseFromParent();
1099 return true;
1100}
1101
1102bool RISCVInstructionSelector::selectInsertSubVector(MachineInstr &MI) const {
1103 assert(MI.getOpcode() == TargetOpcode::G_INSERT_SUBVECTOR);
1104
1105 Register DstReg = MI.getOperand(0).getReg();
1106 Register VecReg = MI.getOperand(1).getReg();
1107 Register SubVecReg = MI.getOperand(2).getReg();
1108
1109 LLT VecTy = MRI->getType(VecReg);
1110 LLT SubVecTy = MRI->getType(SubVecReg);
1111
1112 MVT VecMVT = getMVTForLLT(VecTy);
1113 MVT SubVecMVT = getMVTForLLT(SubVecTy);
1114
1115 unsigned Idx = static_cast<unsigned>(MI.getOperand(3).getImm());
1116
1117 unsigned SubRegIdx;
1118 std::tie(SubRegIdx, Idx) =
1120 VecMVT, SubVecMVT, Idx, &TRI);
1121
1122 // If the Idx hasn't been completely eliminated then this is a subvector
1123 // insert which doesn't naturally align to a vector register. These must
1124 // be handled using instructions to manipulate the vector registers.
1125 if (Idx != 0)
1126 return false;
1127
1128 // Constrain dst
1129 unsigned DstRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(VecMVT);
1130 const TargetRegisterClass *DstRC = TRI.getRegClass(DstRegClassID);
1131 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
1132 return false;
1133
1134 // If we haven't set a SubRegIdx, then we must be going between
1135 // equally-sized LMUL groups (e.g. VR -> VR). This can be done as a copy.
1136 if (SubRegIdx == RISCV::NoSubRegister) {
1138 DstRegClassID &&
1139 "Unexpected subvector insert");
1140 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII.get(TargetOpcode::COPY),
1141 DstReg)
1142 .addReg(SubVecReg);
1143 MI.eraseFromParent();
1144 return true;
1145 }
1146
1147 // Use INSERT_SUBREG to insert the subvector into the vector at the
1148 // appropriate subregister index.
1149 MachineInstr *Ins = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1150 TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
1151 .addReg(VecReg)
1152 .addReg(SubVecReg)
1153 .addImm(SubRegIdx);
1154
1155 MI.eraseFromParent();
1157 return true;
1158}
1159
1160bool RISCVInstructionSelector::select(MachineInstr &MI) {
1161 preISelLower(MI);
1162 const unsigned Opc = MI.getOpcode();
1163
1164 if (!MI.isPreISelOpcode() || Opc == TargetOpcode::G_PHI) {
1165 if (Opc == TargetOpcode::PHI || Opc == TargetOpcode::G_PHI) {
1166 const Register DefReg = MI.getOperand(0).getReg();
1167 const LLT DefTy = MRI->getType(DefReg);
1168
1169 const RegClassOrRegBank &RegClassOrBank =
1170 MRI->getRegClassOrRegBank(DefReg);
1171
1172 const TargetRegisterClass *DefRC =
1174 if (!DefRC) {
1175 if (!DefTy.isValid()) {
1176 LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
1177 return false;
1178 }
1179
1180 const RegisterBank &RB = *cast<const RegisterBank *>(RegClassOrBank);
1181 DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB, STI.is64Bit());
1182 if (!DefRC) {
1183 LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
1184 return false;
1185 }
1186 }
1187
1188 MI.setDesc(TII.get(TargetOpcode::PHI));
1189 return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
1190 }
1191
1192 // Certain non-generic instructions also need some special handling.
1193 if (MI.isCopy())
1194 return selectCopy(MI);
1195
1196 return true;
1197 }
1198
1199 if (selectImpl(MI, *CoverageInfo))
1200 return true;
1201
1202 switch (Opc) {
1203 case TargetOpcode::G_ANYEXT:
1204 case TargetOpcode::G_PTRTOINT:
1205 case TargetOpcode::G_INTTOPTR:
1206 case TargetOpcode::G_TRUNC:
1207 case TargetOpcode::G_FREEZE:
1208 return selectCopy(MI);
1209 case TargetOpcode::G_CONSTANT: {
1210 Register DstReg = MI.getOperand(0).getReg();
1211 int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue();
1212
1213 if (!materializeImm(DstReg, Imm, MI))
1214 return false;
1215
1216 MI.eraseFromParent();
1217 return true;
1218 }
1219 case TargetOpcode::G_ZEXT:
1220 case TargetOpcode::G_SEXT: {
1221 bool IsSigned = Opc != TargetOpcode::G_ZEXT;
1222 Register DstReg = MI.getOperand(0).getReg();
1223 Register SrcReg = MI.getOperand(1).getReg();
1224 LLT SrcTy = MRI->getType(SrcReg);
1225 unsigned SrcSize = SrcTy.getSizeInBits();
1226
1227 if (SrcTy.isVector())
1228 return false; // Should be handled by imported patterns.
1229
1230 assert((*RBI.getRegBank(DstReg, *MRI, TRI)).getID() ==
1231 RISCV::GPRBRegBankID &&
1232 "Unexpected ext regbank");
1233
1234 // Use addiw SrcReg, 0 (sext.w) for i32.
1235 if (IsSigned && SrcSize == 32) {
1236 MI.setDesc(TII.get(RISCV::ADDIW));
1237 MI.addOperand(MachineOperand::CreateImm(0));
1239 return true;
1240 }
1241
1242 // Use add.uw SrcReg, X0 (zext.w) for i32 with Zba.
1243 if (!IsSigned && SrcSize == 32 && STI.hasStdExtZba()) {
1244 MI.setDesc(TII.get(RISCV::ADD_UW));
1245 MI.addOperand(MachineOperand::CreateReg(RISCV::X0, /*isDef=*/false));
1247 return true;
1248 }
1249
1250 // Use sext.h/zext.h for i16 with Zbb.
1251 if (SrcSize == 16 &&
1252 (STI.hasStdExtZbb() || (!IsSigned && STI.hasStdExtZbkb()))) {
1253 MI.setDesc(TII.get(IsSigned ? RISCV::SEXT_H
1254 : STI.isRV64() ? RISCV::ZEXT_H_RV64
1255 : RISCV::ZEXT_H_RV32));
1257 return true;
1258 }
1259
1260 // Fall back to shift pair.
1261 Register ShiftLeftReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1262 MachineInstr *ShiftLeft = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1263 TII.get(RISCV::SLLI), ShiftLeftReg)
1264 .addReg(SrcReg)
1265 .addImm(STI.getXLen() - SrcSize);
1266 constrainSelectedInstRegOperands(*ShiftLeft, TII, TRI, RBI);
1267 MachineInstr *ShiftRight =
1268 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1269 TII.get(IsSigned ? RISCV::SRAI : RISCV::SRLI), DstReg)
1270 .addReg(ShiftLeftReg)
1271 .addImm(STI.getXLen() - SrcSize);
1272 constrainSelectedInstRegOperands(*ShiftRight, TII, TRI, RBI);
1273 MI.eraseFromParent();
1274 return true;
1275 }
1276 case TargetOpcode::G_FCONSTANT: {
1277 // TODO: Use constant pool for complex constants.
1278 Register DstReg = MI.getOperand(0).getReg();
1279 const APFloat &FPimm = MI.getOperand(1).getFPImm()->getValueAPF();
1280 unsigned Size = MRI->getType(DstReg).getSizeInBits();
1281 if (Size == 16 || Size == 32 || (Size == 64 && Subtarget->is64Bit())) {
1282 Register GPRReg;
1283 if (FPimm.isPosZero()) {
1284 GPRReg = RISCV::X0;
1285 } else {
1286 GPRReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1287 APInt Imm = FPimm.bitcastToAPInt();
1288 if (!materializeImm(GPRReg, Imm.getSExtValue(), MI))
1289 return false;
1290 }
1291
1292 unsigned Opcode = Size == 64 ? RISCV::FMV_D_X
1293 : Size == 32 ? RISCV::FMV_W_X
1294 : RISCV::FMV_H_X;
1295 MachineInstr *FMV = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1296 TII.get(Opcode), DstReg)
1297 .addReg(GPRReg);
1299 } else {
1300 // s64 on rv32
1301 assert(Size == 64 && !Subtarget->is64Bit() &&
1302 "Unexpected size or subtarget");
1303
1304 if (FPimm.isPosZero()) {
1305 // Optimize +0.0 to use fcvt.d.w
1306 MachineInstr *FCVT = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1307 TII.get(RISCV::FCVT_D_W), DstReg)
1308 .addReg(RISCV::X0)
1311
1312 MI.eraseFromParent();
1313 return true;
1314 }
1315
1316 // Split into two pieces and build through the stack.
1317 Register GPRRegHigh = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1318 Register GPRRegLow = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1319 APInt Imm = FPimm.bitcastToAPInt();
1320 if (!materializeImm(GPRRegHigh, Imm.extractBits(32, 32).getSExtValue(),
1321 MI))
1322 return false;
1323 if (!materializeImm(GPRRegLow, Imm.trunc(32).getSExtValue(), MI))
1324 return false;
1325 MachineInstr *PairF64 =
1326 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1327 TII.get(RISCV::BuildPairF64Pseudo), DstReg)
1328 .addReg(GPRRegLow)
1329 .addReg(GPRRegHigh);
1330 constrainSelectedInstRegOperands(*PairF64, TII, TRI, RBI);
1331 }
1332
1333 MI.eraseFromParent();
1334 return true;
1335 }
1336 case TargetOpcode::G_GLOBAL_VALUE: {
1337 auto *GV = MI.getOperand(1).getGlobal();
1338 if (GV->isThreadLocal()) {
1339 // TODO: implement this case.
1340 return false;
1341 }
1342
1343 return selectAddr(MI, GV->isDSOLocal(), GV->hasExternalWeakLinkage());
1344 }
1345 case TargetOpcode::G_JUMP_TABLE:
1346 case TargetOpcode::G_CONSTANT_POOL:
1347 return selectAddr(MI);
1348 case TargetOpcode::G_BRCOND: {
1349 Register LHS, RHS;
1351 getOperandsForBranch(MI.getOperand(0).getReg(), CC, LHS, RHS, *MRI);
1352
1353 MachineInstr *Bcc = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1354 TII.get(RISCVCC::getBrCond(CC)))
1355 .addReg(LHS)
1356 .addReg(RHS)
1357 .addMBB(MI.getOperand(1).getMBB());
1358 MI.eraseFromParent();
1360 return true;
1361 }
1362 case TargetOpcode::G_BRINDIRECT:
1363 MI.setDesc(TII.get(RISCV::PseudoBRIND));
1364 MI.addOperand(MachineOperand::CreateImm(0));
1366 return true;
1367 case TargetOpcode::G_SELECT:
1368 return selectSelect(MI);
1369 case TargetOpcode::G_FCMP:
1370 return selectFPCompare(MI);
1371 case TargetOpcode::G_FENCE: {
1372 AtomicOrdering FenceOrdering =
1373 static_cast<AtomicOrdering>(MI.getOperand(0).getImm());
1374 SyncScope::ID FenceSSID =
1375 static_cast<SyncScope::ID>(MI.getOperand(1).getImm());
1376 emitFence(FenceOrdering, FenceSSID, MI);
1377 MI.eraseFromParent();
1378 return true;
1379 }
1380 case TargetOpcode::G_IMPLICIT_DEF:
1381 return selectImplicitDef(MI);
1382 case TargetOpcode::G_UNMERGE_VALUES:
1383 return selectUnmergeValues(MI);
1384 case TargetOpcode::G_LOAD:
1385 case TargetOpcode::G_STORE: {
1386 GLoadStore &LdSt = cast<GLoadStore>(MI);
1387 const Register ValReg = LdSt.getReg(0);
1388 const Register PtrReg = LdSt.getPointerReg();
1389 LLT PtrTy = MRI->getType(PtrReg);
1390
1391 const RegisterBank &RB = *RBI.getRegBank(ValReg, *MRI, TRI);
1392 if (RB.getID() != RISCV::GPRBRegBankID)
1393 return false;
1394
1395#ifndef NDEBUG
1396 const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, *MRI, TRI);
1397 // Check that the pointer register is valid.
1398 assert(PtrRB.getID() == RISCV::GPRBRegBankID &&
1399 "Load/Store pointer operand isn't a GPR");
1400 assert(PtrTy.isPointer() && "Load/Store pointer operand isn't a pointer");
1401#endif
1402
1403 // Can only handle AddressSpace 0.
1404 if (PtrTy.getAddressSpace() != 0)
1405 return false;
1406
1407 unsigned MemSize = LdSt.getMemSizeInBits().getValue();
1408 AtomicOrdering Order = LdSt.getMMO().getSuccessOrdering();
1409
1410 if (isStrongerThanMonotonic(Order)) {
1411 MI.setDesc(TII.get(selectZalasrLoadStoreOp(Opc, MemSize)));
1413 return true;
1414 }
1415
1416 const unsigned NewOpc = selectRegImmLoadStoreOp(MI.getOpcode(), MemSize);
1417 if (NewOpc == MI.getOpcode())
1418 return false;
1419
1420 // Check if we can fold anything into the addressing mode.
1421 auto AddrModeFns = selectAddrRegImm(MI.getOperand(1));
1422 if (!AddrModeFns)
1423 return false;
1424
1425 // Folded something. Create a new instruction and return it.
1426 MachineInstrBuilder NewInst =
1427 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII.get(NewOpc));
1428 NewInst.setMIFlags(MI.getFlags());
1429 if (isa<GStore>(MI))
1430 NewInst.addUse(ValReg);
1431 else
1432 NewInst.addDef(ValReg);
1433 NewInst.cloneMemRefs(MI);
1434 for (auto &Fn : *AddrModeFns)
1435 Fn(NewInst);
1436 MI.eraseFromParent();
1437
1438 constrainSelectedInstRegOperands(*NewInst, TII, TRI, RBI);
1439 return true;
1440 }
1441 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1442 return selectIntrinsicWithSideEffects(MI);
1443 case TargetOpcode::G_INTRINSIC:
1444 return selectIntrinsic(MI);
1445 case TargetOpcode::G_EXTRACT_SUBVECTOR:
1446 return selectExtractSubvector(MI);
1447 case TargetOpcode::G_INSERT_SUBVECTOR:
1448 return selectInsertSubVector(MI);
1449 default:
1450 return false;
1451 }
1452}
1453
1454bool RISCVInstructionSelector::selectUnmergeValues(MachineInstr &MI) const {
1455 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
1456
1457 if (!Subtarget->hasStdExtZfa())
1458 return false;
1459
1460 // Split F64 Src into two s32 parts
1461 if (MI.getNumOperands() != 3)
1462 return false;
1463 Register Src = MI.getOperand(2).getReg();
1464 Register Lo = MI.getOperand(0).getReg();
1465 Register Hi = MI.getOperand(1).getReg();
1466 if (!isRegInFprb(Src) || !isRegInGprb(Lo) || !isRegInGprb(Hi))
1467 return false;
1468
1469 MachineInstr *ExtractLo = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1470 TII.get(RISCV::FMV_X_W_FPR64), Lo)
1471 .addReg(Src);
1472 constrainSelectedInstRegOperands(*ExtractLo, TII, TRI, RBI);
1473
1474 MachineInstr *ExtractHi = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1475 TII.get(RISCV::FMVH_X_D), Hi)
1476 .addReg(Src);
1477 constrainSelectedInstRegOperands(*ExtractHi, TII, TRI, RBI);
1478
1479 MI.eraseFromParent();
1480 return true;
1481}
1482
1483bool RISCVInstructionSelector::replacePtrWithInt(MachineOperand &Op) {
1484 Register PtrReg = Op.getReg();
1485 assert(MRI->getType(PtrReg).isPointer() && "Operand is not a pointer!");
1486
1487 const LLT sXLen = LLT::scalar(STI.getXLen());
1488 MachineInstr &ParentMI = *Op.getParent();
1489 Register IntReg = MRI->createGenericVirtualRegister(sXLen);
1490 MRI->setRegBank(IntReg, RBI.getRegBank(RISCV::GPRBRegBankID));
1491 MachineInstr *PtrToInt =
1492 BuildMI(*ParentMI.getParent(), ParentMI, ParentMI.getDebugLoc(),
1493 TII.get(TargetOpcode::G_PTRTOINT), IntReg)
1494 .addReg(PtrReg);
1495 Op.setReg(IntReg);
1496 return select(*PtrToInt);
1497}
1498
1499void RISCVInstructionSelector::preISelLower(MachineInstr &MI) {
1500 switch (MI.getOpcode()) {
1501 case TargetOpcode::G_PTR_ADD: {
1502 Register DstReg = MI.getOperand(0).getReg();
1503 const LLT sXLen = LLT::scalar(STI.getXLen());
1504
1505 replacePtrWithInt(MI.getOperand(1));
1506 MI.setDesc(TII.get(TargetOpcode::G_ADD));
1507 MRI->setType(DstReg, sXLen);
1508 break;
1509 }
1510 case TargetOpcode::G_PTRMASK: {
1511 Register DstReg = MI.getOperand(0).getReg();
1512 const LLT sXLen = LLT::scalar(STI.getXLen());
1513 replacePtrWithInt(MI.getOperand(1));
1514 MI.setDesc(TII.get(TargetOpcode::G_AND));
1515 MRI->setType(DstReg, sXLen);
1516 break;
1517 }
1518 }
1519}
1520
1521void RISCVInstructionSelector::renderNegImm(MachineInstrBuilder &MIB,
1522 const MachineInstr &MI,
1523 int OpIdx) const {
1524 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1525 "Expected G_CONSTANT");
1526 int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
1527 MIB.addImm(-CstVal);
1528}
1529
1530void RISCVInstructionSelector::renderImmSubFromXLen(MachineInstrBuilder &MIB,
1531 const MachineInstr &MI,
1532 int OpIdx) const {
1533 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1534 "Expected G_CONSTANT");
1535 uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();
1536 MIB.addImm(STI.getXLen() - CstVal);
1537}
1538
1539void RISCVInstructionSelector::renderImmSubFrom32(MachineInstrBuilder &MIB,
1540 const MachineInstr &MI,
1541 int OpIdx) const {
1542 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1543 "Expected G_CONSTANT");
1544 uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();
1545 MIB.addImm(32 - CstVal);
1546}
1547
1548void RISCVInstructionSelector::renderImmPlus1(MachineInstrBuilder &MIB,
1549 const MachineInstr &MI,
1550 int OpIdx) const {
1551 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1552 "Expected G_CONSTANT");
1553 int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
1554 MIB.addImm(CstVal + 1);
1555}
1556
1557void RISCVInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
1558 const MachineInstr &MI,
1559 int OpIdx) const {
1560 assert(MI.getOpcode() == TargetOpcode::G_FRAME_INDEX && OpIdx == -1 &&
1561 "Expected G_FRAME_INDEX");
1562 MIB.add(MI.getOperand(1));
1563}
1564
1565void RISCVInstructionSelector::renderTrailingZeros(MachineInstrBuilder &MIB,
1566 const MachineInstr &MI,
1567 int OpIdx) const {
1568 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1569 "Expected G_CONSTANT");
1570 uint64_t C = MI.getOperand(1).getCImm()->getZExtValue();
1572}
1573
1574void RISCVInstructionSelector::renderXLenSubTrailingOnes(
1575 MachineInstrBuilder &MIB, const MachineInstr &MI, int OpIdx) const {
1576 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1577 "Expected G_CONSTANT");
1578 uint64_t C = MI.getOperand(1).getCImm()->getZExtValue();
1579 MIB.addImm(Subtarget->getXLen() - llvm::countr_one(C));
1580}
1581
1582void RISCVInstructionSelector::renderAddiPairImmSmall(MachineInstrBuilder &MIB,
1583 const MachineInstr &MI,
1584 int OpIdx) const {
1585 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1586 "Expected G_CONSTANT");
1587 int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue();
1588 int64_t Adj = Imm < 0 ? -2048 : 2047;
1589 MIB.addImm(Imm - Adj);
1590}
1591
1592void RISCVInstructionSelector::renderAddiPairImmLarge(MachineInstrBuilder &MIB,
1593 const MachineInstr &MI,
1594 int OpIdx) const {
1595 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1596 "Expected G_CONSTANT");
1597 int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue() < 0 ? -2048 : 2047;
1598 MIB.addImm(Imm);
1599}
1600
1601bool RISCVInstructionSelector::isRegInGprb(Register Reg) const {
1602 return RBI.getRegBank(Reg, *MRI, TRI)->getID() == RISCV::GPRBRegBankID;
1603}
1604
1605bool RISCVInstructionSelector::isRegInFprb(Register Reg) const {
1606 return RBI.getRegBank(Reg, *MRI, TRI)->getID() == RISCV::FPRBRegBankID;
1607}
1608
1609bool RISCVInstructionSelector::selectCopy(MachineInstr &MI) const {
1610 MachineOperand Dst = MI.getOperand(0);
1611 Register DstReg = MI.getOperand(0).getReg();
1612
1613 if (DstReg.isPhysical())
1614 return true;
1615
1616 const TargetRegisterClass *DstRC =
1617 TRI.getConstrainedRegClassForOperand(Dst, *MRI);
1618
1619 assert(DstRC &&
1620 "Register class not available for LLT, register bank combination");
1621
1622 // No need to constrain SrcReg. It will get constrained when
1623 // we hit another of its uses or its defs.
1624 // Copies do not have constraints.
1625 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1626 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
1627 << " operand\n");
1628 return false;
1629 }
1630
1631 MI.setDesc(TII.get(RISCV::COPY));
1632 return true;
1633}
1634
1635bool RISCVInstructionSelector::selectImplicitDef(MachineInstr &MI) const {
1636 assert(MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
1637
1638 const Register DstReg = MI.getOperand(0).getReg();
1639 const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(
1640 MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI), STI.is64Bit());
1641
1642 assert(DstRC &&
1643 "Register class not available for LLT, register bank combination");
1644
1645 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1646 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
1647 << " operand\n");
1648 }
1649 MI.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
1650 return true;
1651}
1652
1653bool RISCVInstructionSelector::materializeImm(Register DstReg, int64_t Imm,
1654 MachineInstr &MI) const {
1655 MachineBasicBlock &MBB = *MI.getParent();
1656 DebugLoc DL = MI.getDebugLoc();
1657
1658 if (Imm == 0) {
1659 BuildMI(MBB, MI, DL, TII.get(TargetOpcode::COPY), DstReg).addReg(RISCV::X0);
1660 RBI.constrainGenericRegister(DstReg, RISCV::GPRRegClass, *MRI);
1661 return true;
1662 }
1663
1665 unsigned NumInsts = Seq.size();
1666 Register SrcReg = RISCV::X0;
1667
1668 for (unsigned i = 0; i < NumInsts; i++) {
1669 Register TmpReg = i < NumInsts - 1
1670 ? MRI->createVirtualRegister(&RISCV::GPRRegClass)
1671 : DstReg;
1672 const RISCVMatInt::Inst &I = Seq[i];
1673 MachineInstr *Result;
1674
1675 switch (I.getOpndKind()) {
1676 case RISCVMatInt::Imm:
1677 // clang-format off
1678 Result = BuildMI(MBB, MI, DL, TII.get(I.getOpcode()), TmpReg)
1679 .addImm(I.getImm());
1680 // clang-format on
1681 break;
1682 case RISCVMatInt::RegX0:
1683 Result = BuildMI(MBB, MI, DL, TII.get(I.getOpcode()), TmpReg)
1684 .addReg(SrcReg)
1685 .addReg(RISCV::X0);
1686 break;
1688 Result = BuildMI(MBB, MI, DL, TII.get(I.getOpcode()), TmpReg)
1689 .addReg(SrcReg)
1690 .addReg(SrcReg);
1691 break;
1693 Result = BuildMI(MBB, MI, DL, TII.get(I.getOpcode()), TmpReg)
1694 .addReg(SrcReg)
1695 .addImm(I.getImm());
1696 break;
1697 }
1698
1700
1701 SrcReg = TmpReg;
1702 }
1703
1704 return true;
1705}
1706
1707bool RISCVInstructionSelector::selectAddr(MachineInstr &MI, bool IsLocal,
1708 bool IsExternWeak) const {
1709 assert((MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1710 MI.getOpcode() == TargetOpcode::G_JUMP_TABLE ||
1711 MI.getOpcode() == TargetOpcode::G_CONSTANT_POOL) &&
1712 "Unexpected opcode");
1713
1714 const MachineOperand &DispMO = MI.getOperand(1);
1715
1716 Register DefReg = MI.getOperand(0).getReg();
1717 const LLT DefTy = MRI->getType(DefReg);
1718
1719 // When HWASAN is used and tagging of global variables is enabled
1720 // they should be accessed via the GOT, since the tagged address of a global
1721 // is incompatible with existing code models. This also applies to non-pic
1722 // mode.
1723 if (TM.isPositionIndependent() || Subtarget->allowTaggedGlobals()) {
1724 if (IsLocal && !Subtarget->allowTaggedGlobals()) {
1725 // Use PC-relative addressing to access the symbol. This generates the
1726 // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
1727 // %pcrel_lo(auipc)).
1728 MI.setDesc(TII.get(RISCV::PseudoLLA));
1730 return true;
1731 }
1732
1733 // Use PC-relative addressing to access the GOT for this symbol, then
1734 // load the address from the GOT. This generates the pattern (PseudoLGA
1735 // sym), which expands to (ld (addi (auipc %got_pcrel_hi(sym))
1736 // %pcrel_lo(auipc))).
1737 MachineFunction &MF = *MI.getParent()->getParent();
1738 MachineMemOperand *MemOp = MF.getMachineMemOperand(
1742 DefTy, Align(DefTy.getSizeInBits() / 8));
1743
1744 MachineInstr *Result = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1745 TII.get(RISCV::PseudoLGA), DefReg)
1746 .addDisp(DispMO, 0)
1747 .addMemOperand(MemOp);
1748
1750
1751 MI.eraseFromParent();
1752 return true;
1753 }
1754
1755 switch (TM.getCodeModel()) {
1756 default: {
1758 "Unsupported code model for lowering", MI);
1759 return false;
1760 }
1761 case CodeModel::Small: {
1762 // Must lie within a single 2 GiB address range and must lie between
1763 // absolute addresses -2 GiB and +2 GiB. This generates the pattern (addi
1764 // (lui %hi(sym)) %lo(sym)).
1765 Register AddrHiDest = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1766 MachineInstr *AddrHi = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1767 TII.get(RISCV::LUI), AddrHiDest)
1768 .addDisp(DispMO, 0, RISCVII::MO_HI);
1769
1771
1772 MachineInstr *Result = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1773 TII.get(RISCV::ADDI), DefReg)
1774 .addReg(AddrHiDest)
1775 .addDisp(DispMO, 0, RISCVII::MO_LO);
1776
1778
1779 MI.eraseFromParent();
1780 return true;
1781 }
1782 case CodeModel::Medium:
1783 // Emit LGA/LLA instead of the sequence it expands to because the pcrel_lo
1784 // relocation needs to reference a label that points to the auipc
1785 // instruction itself, not the global. This cannot be done inside the
1786 // instruction selector.
1787 if (IsExternWeak) {
1788 // An extern weak symbol may be undefined, i.e. have value 0, which may
1789 // not be within 2GiB of PC, so use GOT-indirect addressing to access the
1790 // symbol. This generates the pattern (PseudoLGA sym), which expands to
1791 // (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
1792 MachineFunction &MF = *MI.getParent()->getParent();
1793 MachineMemOperand *MemOp = MF.getMachineMemOperand(
1797 DefTy, Align(DefTy.getSizeInBits() / 8));
1798
1799 MachineInstr *Result = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1800 TII.get(RISCV::PseudoLGA), DefReg)
1801 .addDisp(DispMO, 0)
1802 .addMemOperand(MemOp);
1803
1805
1806 MI.eraseFromParent();
1807 return true;
1808 }
1809
1810 // Generate a sequence for accessing addresses within any 2GiB range
1811 // within the address space. This generates the pattern (PseudoLLA sym),
1812 // which expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
1813 MI.setDesc(TII.get(RISCV::PseudoLLA));
1815 return true;
1816 }
1817
1818 return false;
1819}
1820
1821bool RISCVInstructionSelector::selectSelect(MachineInstr &MI) const {
1822 auto &SelectMI = cast<GSelect>(MI);
1823
1824 Register LHS, RHS;
1826 getOperandsForBranch(SelectMI.getCondReg(), CC, LHS, RHS, *MRI);
1827
1828 Register DstReg = SelectMI.getReg(0);
1829
1830 unsigned Opc = RISCV::Select_GPR_Using_CC_GPR;
1831 if (RBI.getRegBank(DstReg, *MRI, TRI)->getID() == RISCV::FPRBRegBankID) {
1832 unsigned Size = MRI->getType(DstReg).getSizeInBits();
1833 Opc = Size == 32 ? RISCV::Select_FPR32_Using_CC_GPR
1834 : RISCV::Select_FPR64_Using_CC_GPR;
1835 }
1836
1837 MachineInstr *Result =
1838 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII.get(Opc))
1839 .addDef(DstReg)
1840 .addReg(LHS)
1841 .addReg(RHS)
1842 .addImm(CC)
1843 .addReg(SelectMI.getTrueReg())
1844 .addReg(SelectMI.getFalseReg());
1845 MI.eraseFromParent();
1847 return true;
1848}
1849
1850// Convert an FCMP predicate to one of the supported F or D instructions.
1851static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size) {
1852 assert((Size == 16 || Size == 32 || Size == 64) && "Unsupported size");
1853 switch (Pred) {
1854 default:
1855 llvm_unreachable("Unsupported predicate");
1856 case CmpInst::FCMP_OLT:
1857 return Size == 16 ? RISCV::FLT_H : Size == 32 ? RISCV::FLT_S : RISCV::FLT_D;
1858 case CmpInst::FCMP_OLE:
1859 return Size == 16 ? RISCV::FLE_H : Size == 32 ? RISCV::FLE_S : RISCV::FLE_D;
1860 case CmpInst::FCMP_OEQ:
1861 return Size == 16 ? RISCV::FEQ_H : Size == 32 ? RISCV::FEQ_S : RISCV::FEQ_D;
1862 }
1863}
1864
1865// Try legalizing an FCMP by swapping or inverting the predicate to one that
1866// is supported.
1868 CmpInst::Predicate &Pred, bool &NeedInvert) {
1869 auto isLegalFCmpPredicate = [](CmpInst::Predicate Pred) {
1870 return Pred == CmpInst::FCMP_OLT || Pred == CmpInst::FCMP_OLE ||
1871 Pred == CmpInst::FCMP_OEQ;
1872 };
1873
1874 assert(!isLegalFCmpPredicate(Pred) && "Predicate already legal?");
1875
1877 if (isLegalFCmpPredicate(InvPred)) {
1878 Pred = InvPred;
1879 std::swap(LHS, RHS);
1880 return true;
1881 }
1882
1883 InvPred = CmpInst::getInversePredicate(Pred);
1884 NeedInvert = true;
1885 if (isLegalFCmpPredicate(InvPred)) {
1886 Pred = InvPred;
1887 return true;
1888 }
1889 InvPred = CmpInst::getSwappedPredicate(InvPred);
1890 if (isLegalFCmpPredicate(InvPred)) {
1891 Pred = InvPred;
1892 std::swap(LHS, RHS);
1893 return true;
1894 }
1895
1896 return false;
1897}
1898
1899// Emit a sequence of instructions to compare LHS and RHS using Pred. Return
1900// the result in DstReg.
1901// FIXME: Maybe we should expand this earlier.
1902bool RISCVInstructionSelector::selectFPCompare(MachineInstr &MI) const {
1903 auto &CmpMI = cast<GFCmp>(MI);
1904 CmpInst::Predicate Pred = CmpMI.getCond();
1905
1906 Register DstReg = CmpMI.getReg(0);
1907 Register LHS = CmpMI.getLHSReg();
1908 Register RHS = CmpMI.getRHSReg();
1909
1910 unsigned Size = MRI->getType(LHS).getSizeInBits();
1911 assert((Size == 16 || Size == 32 || Size == 64) && "Unexpected size");
1912
1913 Register TmpReg = DstReg;
1914
1915 bool NeedInvert = false;
1916 // First try swapping operands or inverting.
1917 if (legalizeFCmpPredicate(LHS, RHS, Pred, NeedInvert)) {
1918 if (NeedInvert)
1919 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1920 MachineInstr *Cmp = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1921 TII.get(getFCmpOpcode(Pred, Size)), TmpReg)
1922 .addReg(LHS)
1923 .addReg(RHS);
1925 } else if (Pred == CmpInst::FCMP_ONE || Pred == CmpInst::FCMP_UEQ) {
1926 // fcmp one LHS, RHS => (OR (FLT LHS, RHS), (FLT RHS, LHS))
1927 NeedInvert = Pred == CmpInst::FCMP_UEQ;
1928 Register Cmp1Reg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1929 MachineInstr *Cmp1 =
1930 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1931 TII.get(getFCmpOpcode(CmpInst::FCMP_OLT, Size)), Cmp1Reg)
1932 .addReg(LHS)
1933 .addReg(RHS);
1935 Register Cmp2Reg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1936 MachineInstr *Cmp2 =
1937 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1938 TII.get(getFCmpOpcode(CmpInst::FCMP_OLT, Size)), Cmp2Reg)
1939 .addReg(RHS)
1940 .addReg(LHS);
1942 if (NeedInvert)
1943 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1944 MachineInstr *Or = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1945 TII.get(RISCV::OR), TmpReg)
1946 .addReg(Cmp1Reg)
1947 .addReg(Cmp2Reg);
1949 } else if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) {
1950 // fcmp ord LHS, RHS => (AND (FEQ LHS, LHS), (FEQ RHS, RHS))
1951 // If LHS and RHS are the same, a single FEQ suffices.
1952 NeedInvert = Pred == CmpInst::FCMP_UNO;
1953 if (NeedInvert)
1954 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1955 if (LHS == RHS) {
1956 MachineInstr *Cmp =
1957 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1958 TII.get(getFCmpOpcode(CmpInst::FCMP_OEQ, Size)), TmpReg)
1959 .addReg(LHS)
1960 .addReg(LHS);
1962 } else {
1963 Register Cmp1Reg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1964 MachineInstr *Cmp1 =
1965 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1966 TII.get(getFCmpOpcode(CmpInst::FCMP_OEQ, Size)), Cmp1Reg)
1967 .addReg(LHS)
1968 .addReg(LHS);
1970 Register Cmp2Reg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1971 MachineInstr *Cmp2 =
1972 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1973 TII.get(getFCmpOpcode(CmpInst::FCMP_OEQ, Size)), Cmp2Reg)
1974 .addReg(RHS)
1975 .addReg(RHS);
1977 MachineInstr *And = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1978 TII.get(RISCV::AND), TmpReg)
1979 .addReg(Cmp1Reg)
1980 .addReg(Cmp2Reg);
1982 }
1983 } else
1984 llvm_unreachable("Unhandled predicate");
1985
1986 // Emit an XORI to invert the result if needed.
1987 if (NeedInvert) {
1988 MachineInstr *Xor = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
1989 TII.get(RISCV::XORI), DstReg)
1990 .addReg(TmpReg)
1991 .addImm(1);
1993 }
1994
1995 MI.eraseFromParent();
1996 return true;
1997}
1998
1999void RISCVInstructionSelector::emitFence(AtomicOrdering FenceOrdering,
2000 SyncScope::ID FenceSSID,
2001 MachineInstr &MI) const {
2002 MachineBasicBlock &MBB = *MI.getParent();
2003 DebugLoc DL = MI.getDebugLoc();
2004
2005 if (STI.hasStdExtZtso()) {
2006 // The only fence that needs an instruction is a sequentially-consistent
2007 // cross-thread fence.
2008 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
2009 FenceSSID == SyncScope::System) {
2010 // fence rw, rw
2011 BuildMI(MBB, MI, DL, TII.get(RISCV::FENCE))
2014 return;
2015 }
2016
2017 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
2018 BuildMI(MBB, MI, DL, TII.get(TargetOpcode::MEMBARRIER));
2019 return;
2020 }
2021
2022 // singlethread fences only synchronize with signal handlers on the same
2023 // thread and thus only need to preserve instruction order, not actually
2024 // enforce memory ordering.
2025 if (FenceSSID == SyncScope::SingleThread) {
2026 BuildMI(MBB, MI, DL, TII.get(TargetOpcode::MEMBARRIER));
2027 return;
2028 }
2029
2030 // Refer to Table A.6 in the version 2.3 draft of the RISC-V Instruction Set
2031 // Manual: Volume I.
2032 unsigned Pred, Succ;
2033 switch (FenceOrdering) {
2034 default:
2035 llvm_unreachable("Unexpected ordering");
2036 case AtomicOrdering::AcquireRelease:
2037 // fence acq_rel -> fence.tso
2038 BuildMI(MBB, MI, DL, TII.get(RISCV::FENCE_TSO));
2039 return;
2040 case AtomicOrdering::Acquire:
2041 // fence acquire -> fence r, rw
2042 Pred = RISCVFenceField::R;
2044 break;
2045 case AtomicOrdering::Release:
2046 // fence release -> fence rw, w
2048 Succ = RISCVFenceField::W;
2049 break;
2050 case AtomicOrdering::SequentiallyConsistent:
2051 // fence seq_cst -> fence rw, rw
2054 break;
2055 }
2056 BuildMI(MBB, MI, DL, TII.get(RISCV::FENCE)).addImm(Pred).addImm(Succ);
2057}
2058
2059namespace llvm {
2060InstructionSelector *
2062 const RISCVSubtarget &Subtarget,
2063 const RISCVRegisterBankInfo &RBI) {
2064 return new RISCVInstructionSelector(TM, Subtarget, RBI);
2065}
2066} // end namespace llvm
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Provides analysis for querying information about KnownBits during GISel passes.
#define DEBUG_TYPE
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
static bool hasAllWUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI)
static bool hasAllNBitUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI, unsigned OrigBits)
#define I(x, y, z)
Definition MD5.cpp:57
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
MachineInstr unsigned OpIdx
#define P(N)
static StringRef getName(Value *V)
static unsigned selectRegImmLoadStoreOp(unsigned GenericOpc, unsigned OpSize)
Select the RISC-V regimm opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the G...
static unsigned selectZalasrLoadStoreOp(unsigned GenericOpc, unsigned OpSize)
Select the RISC-V Zalasr opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the G...
static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size)
static bool legalizeFCmpPredicate(Register &LHS, Register &RHS, CmpInst::Predicate &Pred, bool &NeedInvert)
static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC, Register &LHS, Register &RHS, MachineRegisterInfo &MRI)
const SmallVectorImpl< MachineOperand > & Cond
This file declares the targeting of the RegisterBankInfo class for RISC-V.
#define LLVM_DEBUG(...)
Definition Debug.h:114
Value * RHS
Value * LHS
APInt bitcastToAPInt() const
Definition APFloat.h:1426
bool isPosZero() const
Definition APFloat.h:1545
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1511
bool ult(const APInt &RHS) const
Unsigned less than comparison.
Definition APInt.h:1118
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
Definition APInt.h:476
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
Definition APInt.h:287
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:679
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:682
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition InstrTypes.h:684
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition InstrTypes.h:687
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:683
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition InstrTypes.h:685
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:686
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition Constants.h:174
This is an important base class in LLVM.
Definition Constant.h:43
virtual void setupMF(MachineFunction &mf, GISelValueTracking *vt, CodeGenCoverage *covinfo=nullptr, ProfileSummaryInfo *psi=nullptr, BlockFrequencyInfo *bfi=nullptr)
Setup per-MF executor state.
Register getPointerReg() const
Get the source register of the pointer value.
MachineMemOperand & getMMO() const
Get the MachineMemOperand on this instruction.
LocationSize getMemSizeInBits() const
Returns the size in bits of the memory access.
Register getReg(unsigned Idx) const
Access the Idx'th operand as a register and return it.
constexpr unsigned getScalarSizeInBits() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
constexpr unsigned getAddressSpace() const
TypeSize getValue() const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addDisp(const MachineOperand &Disp, int64_t off, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addDef(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register definition operand.
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
unsigned getOperandNo(const_mop_iterator I) const
Returns the number of the operand iterator I points to.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
iterator_range< use_nodbg_iterator > use_nodbg_operands(Register Reg) const
const RegClassOrRegBank & getRegClassOrRegBank(Register Reg) const
Return the register bank or register class of Reg.
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
LLVM_ABI void setRegBank(Register Reg, const RegisterBank &RegBank)
Set the register bank to RegBank for Reg.
LLVM_ABI void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
Analysis providing profile information.
This class provides the information for the target register banks.
unsigned getXLen() const
std::optional< unsigned > getRealVLen() const
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
static unsigned getRegClassIDForVecVT(MVT VT)
static RISCVVType::VLMUL getLMUL(MVT VT)
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
bool isPositionIndependent() const
CodeModel::Model getCodeModel() const
Returns the code model.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
operand_type_match m_Reg()
SpecificConstantMatch m_SpecificICst(const APInt &RequestedValue)
Matches a constant equal to RequestedValue.
operand_type_match m_Pred()
UnaryOp_match< SrcTy, TargetOpcode::G_ZEXT > m_GZExt(const SrcTy &Src)
ConstantMatch< APInt > m_ICst(APInt &Cst)
BinaryOp_match< LHS, RHS, TargetOpcode::G_ADD, true > m_GAdd(const LHS &L, const RHS &R)
OneNonDBGUse_match< SubPat > m_OneNonDBGUse(const SubPat &SP)
CompareOp_match< Pred, LHS, RHS, TargetOpcode::G_ICMP > m_GICmp(const Pred &P, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SUB > m_GSub(const LHS &L, const RHS &R)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SHL, false > m_GShl(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_AND, true > m_GAnd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_LSHR, false > m_GLShr(const LHS &L, const RHS &R)
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
SmallVector< Inst, 8 > InstSeq
Definition RISCVMatInt.h:43
static unsigned decodeVSEW(unsigned VSEW)
LLVM_ABI unsigned getSEWLMULRatio(unsigned SEW, VLMUL VLMul)
LLVM_ABI unsigned encodeVTYPE(VLMUL VLMUL, unsigned SEW, bool TailAgnostic, bool MaskAgnostic, bool AltFmt=false)
static constexpr int64_t VLMaxSentinel
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
Definition LLVMContext.h:55
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
This is an optimization pass for GlobalISel generic memory operations.
@ Offset
Definition DWP.cpp:557
PointerUnion< const TargetRegisterClass *, const RegisterBank * > RegClassOrRegBank
Convenient type to represent either a register class or a register bank.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
bool isStrongerThanMonotonic(AtomicOrdering AO)
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
Definition bit.h:315
LLVM_ABI void constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition Utils.cpp:155
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
Definition bit.h:325
LLVM_ABI MVT getMVTForLLT(LLT Ty)
Get a rough equivalent of an MVT for a given LLT.
InstructionSelector * createRISCVInstructionSelector(const RISCVTargetMachine &TM, const RISCVSubtarget &Subtarget, const RISCVRegisterBankInfo &RBI)
LLVM_ABI std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
Definition Utils.cpp:313
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:204
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void reportGISelFailure(MachineFunction &MF, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel error as a missed optimization remark to the LLVMContext's diagnostic stream.
Definition Utils.cpp:257
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
AtomicOrdering
Atomic ordering for LLVM's memory model.
constexpr T maskTrailingZeros(unsigned N)
Create a bitmask with the N right-most bits set to 0, and all other bits set to 1.
Definition MathExtras.h:94
@ Or
Bitwise or logical OR of integers.
@ Xor
Bitwise or logical XOR of integers.
@ And
Bitwise or logical AND of integers.
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
Definition Utils.cpp:432
constexpr T maskTrailingOnes(unsigned N)
Create a bitmask with the N right-most bits set to 1, and all other bits set to 0.
Definition MathExtras.h:77
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:177
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define MORE()
Definition regcomp.c:246
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.