LLVM 22.0.0git
RISCVInstructionSelector.cpp
Go to the documentation of this file.
1//===-- RISCVInstructionSelector.cpp -----------------------------*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the InstructionSelector class for
10/// RISC-V.
11/// \todo This should be generated by TableGen.
12//===----------------------------------------------------------------------===//
13
16#include "RISCVSubtarget.h"
17#include "RISCVTargetMachine.h"
25#include "llvm/IR/IntrinsicsRISCV.h"
26#include "llvm/Support/Debug.h"
27
28#define DEBUG_TYPE "riscv-isel"
29
30using namespace llvm;
31using namespace MIPatternMatch;
32
33#define GET_GLOBALISEL_PREDICATE_BITSET
34#include "RISCVGenGlobalISel.inc"
35#undef GET_GLOBALISEL_PREDICATE_BITSET
36
37namespace {
38
39class RISCVInstructionSelector : public InstructionSelector {
40public:
41 RISCVInstructionSelector(const RISCVTargetMachine &TM,
42 const RISCVSubtarget &STI,
43 const RISCVRegisterBankInfo &RBI);
44
45 bool select(MachineInstr &MI) override;
46
47 void setupMF(MachineFunction &MF, GISelValueTracking *VT,
48 CodeGenCoverage *CoverageInfo, ProfileSummaryInfo *PSI,
49 BlockFrequencyInfo *BFI) override {
50 InstructionSelector::setupMF(MF, VT, CoverageInfo, PSI, BFI);
51 MRI = &MF.getRegInfo();
52 }
53
54 static const char *getName() { return DEBUG_TYPE; }
55
56private:
58 getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB) const;
59
60 static constexpr unsigned MaxRecursionDepth = 6;
61
62 bool hasAllNBitUsers(const MachineInstr &MI, unsigned Bits,
63 const unsigned Depth = 0) const;
64 bool hasAllHUsers(const MachineInstr &MI) const {
65 return hasAllNBitUsers(MI, 16);
66 }
67 bool hasAllWUsers(const MachineInstr &MI) const {
68 return hasAllNBitUsers(MI, 32);
69 }
70
71 bool isRegInGprb(Register Reg) const;
72 bool isRegInFprb(Register Reg) const;
73
74 // tblgen-erated 'select' implementation, used as the initial selector for
75 // the patterns that don't require complex C++.
76 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
77
78 // A lowering phase that runs before any selection attempts.
79 // Returns true if the instruction was modified.
80 void preISelLower(MachineInstr &MI, MachineIRBuilder &MIB);
81
82 bool replacePtrWithInt(MachineOperand &Op, MachineIRBuilder &MIB);
83
84 // Custom selection methods
85 bool selectCopy(MachineInstr &MI) const;
86 bool selectImplicitDef(MachineInstr &MI, MachineIRBuilder &MIB) const;
87 bool materializeImm(Register Reg, int64_t Imm, MachineIRBuilder &MIB) const;
88 bool selectAddr(MachineInstr &MI, MachineIRBuilder &MIB, bool IsLocal = true,
89 bool IsExternWeak = false) const;
90 bool selectSelect(MachineInstr &MI, MachineIRBuilder &MIB) const;
91 bool selectFPCompare(MachineInstr &MI, MachineIRBuilder &MIB) const;
92 void emitFence(AtomicOrdering FenceOrdering, SyncScope::ID FenceSSID,
93 MachineIRBuilder &MIB) const;
95 void addVectorLoadStoreOperands(MachineInstr &I,
97 unsigned &CurOp, bool IsMasked,
98 bool IsStridedOrIndexed,
99 LLT *IndexVT = nullptr) const;
100 bool selectIntrinsicWithSideEffects(MachineInstr &I,
101 MachineIRBuilder &MIB) const;
102 bool selectIntrinsic(MachineInstr &I, MachineIRBuilder &MIB) const;
103 bool selectExtractSubvector(MachineInstr &MI, MachineIRBuilder &MIB) const;
104
105 ComplexRendererFns selectShiftMask(MachineOperand &Root,
106 unsigned ShiftWidth) const;
107 ComplexRendererFns selectShiftMaskXLen(MachineOperand &Root) const {
108 return selectShiftMask(Root, STI.getXLen());
109 }
110 ComplexRendererFns selectShiftMask32(MachineOperand &Root) const {
111 return selectShiftMask(Root, 32);
112 }
113 ComplexRendererFns selectAddrRegImm(MachineOperand &Root) const;
114
115 ComplexRendererFns selectSExtBits(MachineOperand &Root, unsigned Bits) const;
116 template <unsigned Bits>
117 ComplexRendererFns selectSExtBits(MachineOperand &Root) const {
118 return selectSExtBits(Root, Bits);
119 }
120
121 ComplexRendererFns selectZExtBits(MachineOperand &Root, unsigned Bits) const;
122 template <unsigned Bits>
123 ComplexRendererFns selectZExtBits(MachineOperand &Root) const {
124 return selectZExtBits(Root, Bits);
125 }
126
127 ComplexRendererFns selectSHXADDOp(MachineOperand &Root, unsigned ShAmt) const;
128 template <unsigned ShAmt>
129 ComplexRendererFns selectSHXADDOp(MachineOperand &Root) const {
130 return selectSHXADDOp(Root, ShAmt);
131 }
132
133 ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root,
134 unsigned ShAmt) const;
135 template <unsigned ShAmt>
136 ComplexRendererFns selectSHXADD_UWOp(MachineOperand &Root) const {
137 return selectSHXADD_UWOp(Root, ShAmt);
138 }
139
140 ComplexRendererFns renderVLOp(MachineOperand &Root) const;
141
142 // Custom renderers for tablegen
143 void renderNegImm(MachineInstrBuilder &MIB, const MachineInstr &MI,
144 int OpIdx) const;
145 void renderImmSubFromXLen(MachineInstrBuilder &MIB, const MachineInstr &MI,
146 int OpIdx) const;
147 void renderImmSubFrom32(MachineInstrBuilder &MIB, const MachineInstr &MI,
148 int OpIdx) const;
149 void renderImmPlus1(MachineInstrBuilder &MIB, const MachineInstr &MI,
150 int OpIdx) const;
151 void renderFrameIndex(MachineInstrBuilder &MIB, const MachineInstr &MI,
152 int OpIdx) const;
153
154 void renderTrailingZeros(MachineInstrBuilder &MIB, const MachineInstr &MI,
155 int OpIdx) const;
156 void renderXLenSubTrailingOnes(MachineInstrBuilder &MIB,
157 const MachineInstr &MI, int OpIdx) const;
158
159 void renderAddiPairImmLarge(MachineInstrBuilder &MIB, const MachineInstr &MI,
160 int OpIdx) const;
161 void renderAddiPairImmSmall(MachineInstrBuilder &MIB, const MachineInstr &MI,
162 int OpIdx) const;
163
164 const RISCVSubtarget &STI;
165 const RISCVInstrInfo &TII;
166 const RISCVRegisterInfo &TRI;
167 const RISCVRegisterBankInfo &RBI;
168 const RISCVTargetMachine &TM;
169
170 MachineRegisterInfo *MRI = nullptr;
171
172 // FIXME: This is necessary because DAGISel uses "Subtarget->" and GlobalISel
173 // uses "STI." in the code generated by TableGen. We need to unify the name of
174 // Subtarget variable.
175 const RISCVSubtarget *Subtarget = &STI;
176
177#define GET_GLOBALISEL_PREDICATES_DECL
178#include "RISCVGenGlobalISel.inc"
179#undef GET_GLOBALISEL_PREDICATES_DECL
180
181#define GET_GLOBALISEL_TEMPORARIES_DECL
182#include "RISCVGenGlobalISel.inc"
183#undef GET_GLOBALISEL_TEMPORARIES_DECL
184};
185
186} // end anonymous namespace
187
188#define GET_GLOBALISEL_IMPL
189#include "RISCVGenGlobalISel.inc"
190#undef GET_GLOBALISEL_IMPL
191
192RISCVInstructionSelector::RISCVInstructionSelector(
193 const RISCVTargetMachine &TM, const RISCVSubtarget &STI,
194 const RISCVRegisterBankInfo &RBI)
195 : STI(STI), TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI),
196 TM(TM),
197
199#include "RISCVGenGlobalISel.inc"
202#include "RISCVGenGlobalISel.inc"
204{
205}
206
207// Mimics optimizations in ISel and RISCVOptWInst Pass
208bool RISCVInstructionSelector::hasAllNBitUsers(const MachineInstr &MI,
209 unsigned Bits,
210 const unsigned Depth) const {
211
212 assert((MI.getOpcode() == TargetOpcode::G_ADD ||
213 MI.getOpcode() == TargetOpcode::G_SUB ||
214 MI.getOpcode() == TargetOpcode::G_MUL ||
215 MI.getOpcode() == TargetOpcode::G_SHL ||
216 MI.getOpcode() == TargetOpcode::G_LSHR ||
217 MI.getOpcode() == TargetOpcode::G_AND ||
218 MI.getOpcode() == TargetOpcode::G_OR ||
219 MI.getOpcode() == TargetOpcode::G_XOR ||
220 MI.getOpcode() == TargetOpcode::G_SEXT_INREG || Depth != 0) &&
221 "Unexpected opcode");
222
223 if (Depth >= RISCVInstructionSelector::MaxRecursionDepth)
224 return false;
225
226 auto DestReg = MI.getOperand(0).getReg();
227 for (auto &UserOp : MRI->use_nodbg_operands(DestReg)) {
228 assert(UserOp.getParent() && "UserOp must have a parent");
229 const MachineInstr &UserMI = *UserOp.getParent();
230 unsigned OpIdx = UserOp.getOperandNo();
231
232 switch (UserMI.getOpcode()) {
233 default:
234 return false;
235 case RISCV::ADDW:
236 case RISCV::ADDIW:
237 case RISCV::SUBW:
238 case RISCV::FCVT_D_W:
239 case RISCV::FCVT_S_W:
240 if (Bits >= 32)
241 break;
242 return false;
243 case RISCV::SLL:
244 case RISCV::SRA:
245 case RISCV::SRL:
246 // Shift amount operands only use log2(Xlen) bits.
247 if (OpIdx == 2 && Bits >= Log2_32(Subtarget->getXLen()))
248 break;
249 return false;
250 case RISCV::SLLI:
251 // SLLI only uses the lower (XLen - ShAmt) bits.
252 if (Bits >= Subtarget->getXLen() - UserMI.getOperand(2).getImm())
253 break;
254 return false;
255 case RISCV::ANDI:
256 if (Bits >= (unsigned)llvm::bit_width<uint64_t>(
257 (uint64_t)UserMI.getOperand(2).getImm()))
258 break;
259 goto RecCheck;
260 case RISCV::AND:
261 case RISCV::OR:
262 case RISCV::XOR:
263 RecCheck:
264 if (hasAllNBitUsers(UserMI, Bits, Depth + 1))
265 break;
266 return false;
267 case RISCV::SRLI: {
268 unsigned ShAmt = UserMI.getOperand(2).getImm();
269 // If we are shifting right by less than Bits, and users don't demand any
270 // bits that were shifted into [Bits-1:0], then we can consider this as an
271 // N-Bit user.
272 if (Bits > ShAmt && hasAllNBitUsers(UserMI, Bits - ShAmt, Depth + 1))
273 break;
274 return false;
275 }
276 }
277 }
278
279 return true;
280}
281
282InstructionSelector::ComplexRendererFns
283RISCVInstructionSelector::selectShiftMask(MachineOperand &Root,
284 unsigned ShiftWidth) const {
285 if (!Root.isReg())
286 return std::nullopt;
287
288 using namespace llvm::MIPatternMatch;
289
290 Register ShAmtReg = Root.getReg();
291 // Peek through zext.
292 Register ZExtSrcReg;
293 if (mi_match(ShAmtReg, *MRI, m_GZExt(m_Reg(ZExtSrcReg))))
294 ShAmtReg = ZExtSrcReg;
295
296 APInt AndMask;
297 Register AndSrcReg;
298 // Try to combine the following pattern (applicable to other shift
299 // instructions as well as 32-bit ones):
300 //
301 // %4:gprb(s64) = G_AND %3, %2
302 // %5:gprb(s64) = G_LSHR %1, %4(s64)
303 //
304 // According to RISC-V's ISA manual, SLL, SRL, and SRA ignore other bits than
305 // the lowest log2(XLEN) bits of register rs2. As for the above pattern, if
306 // the lowest log2(XLEN) bits of register rd and rs2 of G_AND are the same,
307 // then it can be eliminated. Given register rs1 or rs2 holding a constant
308 // (the and mask), there are two cases G_AND can be erased:
309 //
310 // 1. the lowest log2(XLEN) bits of the and mask are all set
311 // 2. the bits of the register being masked are already unset (zero set)
312 if (mi_match(ShAmtReg, *MRI, m_GAnd(m_Reg(AndSrcReg), m_ICst(AndMask)))) {
313 APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1);
314 if (ShMask.isSubsetOf(AndMask)) {
315 ShAmtReg = AndSrcReg;
316 } else {
317 // SimplifyDemandedBits may have optimized the mask so try restoring any
318 // bits that are known zero.
319 KnownBits Known = VT->getKnownBits(AndSrcReg);
320 if (ShMask.isSubsetOf(AndMask | Known.Zero))
321 ShAmtReg = AndSrcReg;
322 }
323 }
324
325 APInt Imm;
327 if (mi_match(ShAmtReg, *MRI, m_GAdd(m_Reg(Reg), m_ICst(Imm)))) {
328 if (Imm != 0 && Imm.urem(ShiftWidth) == 0)
329 // If we are shifting by X+N where N == 0 mod Size, then just shift by X
330 // to avoid the ADD.
331 ShAmtReg = Reg;
332 } else if (mi_match(ShAmtReg, *MRI, m_GSub(m_ICst(Imm), m_Reg(Reg)))) {
333 if (Imm != 0 && Imm.urem(ShiftWidth) == 0) {
334 // If we are shifting by N-X where N == 0 mod Size, then just shift by -X
335 // to generate a NEG instead of a SUB of a constant.
336 ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
337 unsigned NegOpc = Subtarget->is64Bit() ? RISCV::SUBW : RISCV::SUB;
338 return {{[=](MachineInstrBuilder &MIB) {
339 MachineIRBuilder(*MIB.getInstr())
340 .buildInstr(NegOpc, {ShAmtReg}, {Register(RISCV::X0), Reg});
341 MIB.addReg(ShAmtReg);
342 }}};
343 }
344 if (Imm.urem(ShiftWidth) == ShiftWidth - 1) {
345 // If we are shifting by N-X where N == -1 mod Size, then just shift by ~X
346 // to generate a NOT instead of a SUB of a constant.
347 ShAmtReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
348 return {{[=](MachineInstrBuilder &MIB) {
349 MachineIRBuilder(*MIB.getInstr())
350 .buildInstr(RISCV::XORI, {ShAmtReg}, {Reg})
351 .addImm(-1);
352 MIB.addReg(ShAmtReg);
353 }}};
354 }
355 }
356
357 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(ShAmtReg); }}};
358}
359
360InstructionSelector::ComplexRendererFns
361RISCVInstructionSelector::selectSExtBits(MachineOperand &Root,
362 unsigned Bits) const {
363 if (!Root.isReg())
364 return std::nullopt;
365 Register RootReg = Root.getReg();
366 MachineInstr *RootDef = MRI->getVRegDef(RootReg);
367
368 if (RootDef->getOpcode() == TargetOpcode::G_SEXT_INREG &&
369 RootDef->getOperand(2).getImm() == Bits) {
370 return {
371 {[=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); }}};
372 }
373
374 unsigned Size = MRI->getType(RootReg).getScalarSizeInBits();
375 if ((Size - VT->computeNumSignBits(RootReg)) < Bits)
376 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
377
378 return std::nullopt;
379}
380
381InstructionSelector::ComplexRendererFns
382RISCVInstructionSelector::selectZExtBits(MachineOperand &Root,
383 unsigned Bits) const {
384 if (!Root.isReg())
385 return std::nullopt;
386 Register RootReg = Root.getReg();
387
388 Register RegX;
389 uint64_t Mask = maskTrailingOnes<uint64_t>(Bits);
390 if (mi_match(RootReg, *MRI, m_GAnd(m_Reg(RegX), m_SpecificICst(Mask)))) {
391 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
392 }
393
394 if (mi_match(RootReg, *MRI, m_GZExt(m_Reg(RegX))) &&
395 MRI->getType(RegX).getScalarSizeInBits() == Bits)
396 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
397
398 unsigned Size = MRI->getType(RootReg).getScalarSizeInBits();
399 if (VT->maskedValueIsZero(RootReg, APInt::getBitsSetFrom(Size, Bits)))
400 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
401
402 return std::nullopt;
403}
404
405InstructionSelector::ComplexRendererFns
406RISCVInstructionSelector::selectSHXADDOp(MachineOperand &Root,
407 unsigned ShAmt) const {
408 using namespace llvm::MIPatternMatch;
409
410 if (!Root.isReg())
411 return std::nullopt;
412 Register RootReg = Root.getReg();
413
414 const unsigned XLen = STI.getXLen();
415 APInt Mask, C2;
416 Register RegY;
417 std::optional<bool> LeftShift;
418 // (and (shl y, c2), mask)
419 if (mi_match(RootReg, *MRI,
420 m_GAnd(m_GShl(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask))))
421 LeftShift = true;
422 // (and (lshr y, c2), mask)
423 else if (mi_match(RootReg, *MRI,
424 m_GAnd(m_GLShr(m_Reg(RegY), m_ICst(C2)), m_ICst(Mask))))
425 LeftShift = false;
426
427 if (LeftShift.has_value()) {
428 if (*LeftShift)
430 else
432
433 if (Mask.isShiftedMask()) {
434 unsigned Leading = XLen - Mask.getActiveBits();
435 unsigned Trailing = Mask.countr_zero();
436 // Given (and (shl y, c2), mask) in which mask has no leading zeros and
437 // c3 trailing zeros. We can use an SRLI by c3 - c2 followed by a SHXADD.
438 if (*LeftShift && Leading == 0 && C2.ult(Trailing) && Trailing == ShAmt) {
439 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
440 return {{[=](MachineInstrBuilder &MIB) {
441 MachineIRBuilder(*MIB.getInstr())
442 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
443 .addImm(Trailing - C2.getLimitedValue());
444 MIB.addReg(DstReg);
445 }}};
446 }
447
448 // Given (and (lshr y, c2), mask) in which mask has c2 leading zeros and
449 // c3 trailing zeros. We can use an SRLI by c2 + c3 followed by a SHXADD.
450 if (!*LeftShift && Leading == C2 && Trailing == ShAmt) {
451 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
452 return {{[=](MachineInstrBuilder &MIB) {
453 MachineIRBuilder(*MIB.getInstr())
454 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
455 .addImm(Leading + Trailing);
456 MIB.addReg(DstReg);
457 }}};
458 }
459 }
460 }
461
462 LeftShift.reset();
463
464 // (shl (and y, mask), c2)
465 if (mi_match(RootReg, *MRI,
466 m_GShl(m_OneNonDBGUse(m_GAnd(m_Reg(RegY), m_ICst(Mask))),
467 m_ICst(C2))))
468 LeftShift = true;
469 // (lshr (and y, mask), c2)
470 else if (mi_match(RootReg, *MRI,
472 m_ICst(C2))))
473 LeftShift = false;
474
475 if (LeftShift.has_value() && Mask.isShiftedMask()) {
476 unsigned Leading = XLen - Mask.getActiveBits();
477 unsigned Trailing = Mask.countr_zero();
478
479 // Given (shl (and y, mask), c2) in which mask has 32 leading zeros and
480 // c3 trailing zeros. If c1 + c3 == ShAmt, we can emit SRLIW + SHXADD.
481 bool Cond = *LeftShift && Leading == 32 && Trailing > 0 &&
482 (Trailing + C2.getLimitedValue()) == ShAmt;
483 if (!Cond)
484 // Given (lshr (and y, mask), c2) in which mask has 32 leading zeros and
485 // c3 trailing zeros. If c3 - c1 == ShAmt, we can emit SRLIW + SHXADD.
486 Cond = !*LeftShift && Leading == 32 && C2.ult(Trailing) &&
487 (Trailing - C2.getLimitedValue()) == ShAmt;
488
489 if (Cond) {
490 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
491 return {{[=](MachineInstrBuilder &MIB) {
492 MachineIRBuilder(*MIB.getInstr())
493 .buildInstr(RISCV::SRLIW, {DstReg}, {RegY})
494 .addImm(Trailing);
495 MIB.addReg(DstReg);
496 }}};
497 }
498 }
499
500 return std::nullopt;
501}
502
503InstructionSelector::ComplexRendererFns
504RISCVInstructionSelector::selectSHXADD_UWOp(MachineOperand &Root,
505 unsigned ShAmt) const {
506 using namespace llvm::MIPatternMatch;
507
508 if (!Root.isReg())
509 return std::nullopt;
510 Register RootReg = Root.getReg();
511
512 // Given (and (shl x, c2), mask) in which mask is a shifted mask with
513 // 32 - ShAmt leading zeros and c2 trailing zeros. We can use SLLI by
514 // c2 - ShAmt followed by SHXADD_UW with ShAmt for x amount.
515 APInt Mask, C2;
516 Register RegX;
517 if (mi_match(
518 RootReg, *MRI,
520 m_ICst(Mask))))) {
522
523 if (Mask.isShiftedMask()) {
524 unsigned Leading = Mask.countl_zero();
525 unsigned Trailing = Mask.countr_zero();
526 if (Leading == 32 - ShAmt && C2 == Trailing && Trailing > ShAmt) {
527 Register DstReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
528 return {{[=](MachineInstrBuilder &MIB) {
529 MachineIRBuilder(*MIB.getInstr())
530 .buildInstr(RISCV::SLLI, {DstReg}, {RegX})
531 .addImm(C2.getLimitedValue() - ShAmt);
532 MIB.addReg(DstReg);
533 }}};
534 }
535 }
536 }
537
538 return std::nullopt;
539}
540
541InstructionSelector::ComplexRendererFns
542RISCVInstructionSelector::renderVLOp(MachineOperand &Root) const {
543 assert(Root.isReg() && "Expected operand to be a Register");
544 MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
545
546 if (RootDef->getOpcode() == TargetOpcode::G_CONSTANT) {
547 auto C = RootDef->getOperand(1).getCImm();
548 if (C->getValue().isAllOnes())
549 // If the operand is a G_CONSTANT with value of all ones it is larger than
550 // VLMAX. We convert it to an immediate with value VLMaxSentinel. This is
551 // recognized specially by the vsetvli insertion pass.
552 return {{[=](MachineInstrBuilder &MIB) {
553 MIB.addImm(RISCV::VLMaxSentinel);
554 }}};
555
556 if (isUInt<5>(C->getZExtValue())) {
557 uint64_t ZExtC = C->getZExtValue();
558 return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(ZExtC); }}};
559 }
560 }
561 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); }}};
562}
563
564InstructionSelector::ComplexRendererFns
565RISCVInstructionSelector::selectAddrRegImm(MachineOperand &Root) const {
566 if (!Root.isReg())
567 return std::nullopt;
568
569 MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
570 if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
571 return {{
572 [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); },
573 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
574 }};
575 }
576
577 if (isBaseWithConstantOffset(Root, *MRI)) {
578 MachineOperand &LHS = RootDef->getOperand(1);
579 MachineOperand &RHS = RootDef->getOperand(2);
580 MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
581 MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
582
583 int64_t RHSC = RHSDef->getOperand(1).getCImm()->getSExtValue();
584 if (isInt<12>(RHSC)) {
585 if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
586 return {{
587 [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->getOperand(1)); },
588 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
589 }};
590
591 return {{[=](MachineInstrBuilder &MIB) { MIB.add(LHS); },
592 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); }}};
593 }
594 }
595
596 // TODO: Need to get the immediate from a G_PTR_ADD. Should this be done in
597 // the combiner?
598 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
599 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }}};
600}
601
602/// Returns the RISCVCC::CondCode that corresponds to the CmpInst::Predicate CC.
603/// CC Must be an ICMP Predicate.
604static RISCVCC::CondCode getRISCVCCFromICmp(CmpInst::Predicate CC) {
605 switch (CC) {
606 default:
607 llvm_unreachable("Expected ICMP CmpInst::Predicate.");
608 case CmpInst::Predicate::ICMP_EQ:
609 return RISCVCC::COND_EQ;
610 case CmpInst::Predicate::ICMP_NE:
611 return RISCVCC::COND_NE;
612 case CmpInst::Predicate::ICMP_ULT:
613 return RISCVCC::COND_LTU;
614 case CmpInst::Predicate::ICMP_SLT:
615 return RISCVCC::COND_LT;
616 case CmpInst::Predicate::ICMP_UGE:
617 return RISCVCC::COND_GEU;
618 case CmpInst::Predicate::ICMP_SGE:
619 return RISCVCC::COND_GE;
620 }
621}
622
626 // Try to fold an ICmp. If that fails, use a NE compare with X0.
628 if (!mi_match(CondReg, MRI, m_GICmp(m_Pred(Pred), m_Reg(LHS), m_Reg(RHS)))) {
629 LHS = CondReg;
630 RHS = RISCV::X0;
631 CC = RISCVCC::COND_NE;
632 return;
633 }
634
635 // We found an ICmp, do some canonicalization.
636
637 // Adjust comparisons to use comparison with 0 if possible.
639 switch (Pred) {
641 // Convert X > -1 to X >= 0
642 if (*Constant == -1) {
643 CC = RISCVCC::COND_GE;
644 RHS = RISCV::X0;
645 return;
646 }
647 break;
649 // Convert X < 1 to 0 >= X
650 if (*Constant == 1) {
651 CC = RISCVCC::COND_GE;
652 RHS = LHS;
653 LHS = RISCV::X0;
654 return;
655 }
656 break;
657 default:
658 break;
659 }
660 }
661
662 switch (Pred) {
663 default:
664 llvm_unreachable("Expected ICMP CmpInst::Predicate.");
671 // These CCs are supported directly by RISC-V branches.
672 break;
677 // These CCs are not supported directly by RISC-V branches, but changing the
678 // direction of the CC and swapping LHS and RHS are.
679 Pred = CmpInst::getSwappedPredicate(Pred);
680 std::swap(LHS, RHS);
681 break;
682 }
683
684 CC = getRISCVCCFromICmp(Pred);
685}
686
687/// Select the RISC-V Zalasr opcode for the G_LOAD or G_STORE operation
688/// \p GenericOpc, appropriate for the GPR register bank and of memory access
689/// size \p OpSize.
690static unsigned selectZalasrLoadStoreOp(unsigned GenericOpc, unsigned OpSize) {
691 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
692 switch (OpSize) {
693 default:
694 llvm_unreachable("Unexpected memory size");
695 case 8:
696 return IsStore ? RISCV::SB_RL : RISCV::LB_AQ;
697 case 16:
698 return IsStore ? RISCV::SH_RL : RISCV::LH_AQ;
699 case 32:
700 return IsStore ? RISCV::SW_RL : RISCV::LW_AQ;
701 case 64:
702 return IsStore ? RISCV::SD_RL : RISCV::LD_AQ;
703 }
704}
705
706/// Select the RISC-V regimm opcode for the G_LOAD or G_STORE operation
707/// \p GenericOpc, appropriate for the GPR register bank and of memory access
708/// size \p OpSize. \returns \p GenericOpc if the combination is unsupported.
709static unsigned selectRegImmLoadStoreOp(unsigned GenericOpc, unsigned OpSize) {
710 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
711 switch (OpSize) {
712 case 8:
713 // Prefer unsigned due to no c.lb in Zcb.
714 return IsStore ? RISCV::SB : RISCV::LBU;
715 case 16:
716 return IsStore ? RISCV::SH : RISCV::LH;
717 case 32:
718 return IsStore ? RISCV::SW : RISCV::LW;
719 case 64:
720 return IsStore ? RISCV::SD : RISCV::LD;
721 }
722
723 return GenericOpc;
724}
725
726void RISCVInstructionSelector::addVectorLoadStoreOperands(
727 MachineInstr &I, SmallVectorImpl<SrcOp> &SrcOps, unsigned &CurOp,
728 bool IsMasked, bool IsStridedOrIndexed, LLT *IndexVT) const {
729 // Base Pointer
730 auto PtrReg = I.getOperand(CurOp++).getReg();
731 SrcOps.push_back(PtrReg);
732
733 // Stride or Index
734 if (IsStridedOrIndexed) {
735 auto StrideReg = I.getOperand(CurOp++).getReg();
736 SrcOps.push_back(StrideReg);
737 if (IndexVT)
738 *IndexVT = MRI->getType(StrideReg);
739 }
740
741 // Mask
742 if (IsMasked) {
743 auto MaskReg = I.getOperand(CurOp++).getReg();
744 SrcOps.push_back(MaskReg);
745 }
746}
747
748bool RISCVInstructionSelector::selectIntrinsicWithSideEffects(
749 MachineInstr &I, MachineIRBuilder &MIB) const {
750 // Find the intrinsic ID.
751 unsigned IntrinID = cast<GIntrinsic>(I).getIntrinsicID();
752 // Select the instruction.
753 switch (IntrinID) {
754 default:
755 return false;
756 case Intrinsic::riscv_vlm:
757 case Intrinsic::riscv_vle:
758 case Intrinsic::riscv_vle_mask:
759 case Intrinsic::riscv_vlse:
760 case Intrinsic::riscv_vlse_mask: {
761 bool IsMasked = IntrinID == Intrinsic::riscv_vle_mask ||
762 IntrinID == Intrinsic::riscv_vlse_mask;
763 bool IsStrided = IntrinID == Intrinsic::riscv_vlse ||
764 IntrinID == Intrinsic::riscv_vlse_mask;
765 LLT VT = MRI->getType(I.getOperand(0).getReg());
766 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
767
768 // Result vector
769 const Register DstReg = I.getOperand(0).getReg();
770
771 // Sources
772 bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm;
773 unsigned CurOp = 2;
774 SmallVector<SrcOp, 4> SrcOps; // Source registers.
775
776 // Passthru
777 if (HasPassthruOperand) {
778 auto PassthruReg = I.getOperand(CurOp++).getReg();
779 SrcOps.push_back(PassthruReg);
780 } else {
781 SrcOps.push_back(Register(RISCV::NoRegister));
782 }
783
784 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, IsStrided);
785
787 const RISCV::VLEPseudo *P =
788 RISCV::getVLEPseudo(IsMasked, IsStrided, /*FF*/ false, Log2SEW,
789 static_cast<unsigned>(LMUL));
790
791 auto PseudoMI = MIB.buildInstr(P->Pseudo, {DstReg}, SrcOps);
792
793 // Select VL
794 auto VLOpFn = renderVLOp(I.getOperand(CurOp++));
795 for (auto &RenderFn : *VLOpFn)
796 RenderFn(PseudoMI);
797
798 // SEW
799 PseudoMI.addImm(Log2SEW);
800
801 // Policy
802 uint64_t Policy = RISCVVType::MASK_AGNOSTIC;
803 if (IsMasked)
804 Policy = I.getOperand(CurOp++).getImm();
805 PseudoMI.addImm(Policy);
806
807 // Memref
808 PseudoMI.cloneMemRefs(I);
809
810 I.eraseFromParent();
811 return constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
812 }
813 case Intrinsic::riscv_vloxei:
814 case Intrinsic::riscv_vloxei_mask:
815 case Intrinsic::riscv_vluxei:
816 case Intrinsic::riscv_vluxei_mask: {
817 bool IsMasked = IntrinID == Intrinsic::riscv_vloxei_mask ||
818 IntrinID == Intrinsic::riscv_vluxei_mask;
819 bool IsOrdered = IntrinID == Intrinsic::riscv_vloxei ||
820 IntrinID == Intrinsic::riscv_vloxei_mask;
821 LLT VT = MRI->getType(I.getOperand(0).getReg());
822 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
823
824 // Result vector
825 const Register DstReg = I.getOperand(0).getReg();
826
827 // Sources
828 bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm;
829 unsigned CurOp = 2;
830 SmallVector<SrcOp, 4> SrcOps; // Source registers.
831
832 // Passthru
833 if (HasPassthruOperand) {
834 auto PassthruReg = I.getOperand(CurOp++).getReg();
835 SrcOps.push_back(PassthruReg);
836 } else {
837 // Use NoRegister if there is no specified passthru.
838 SrcOps.push_back(Register());
839 }
840 LLT IndexVT;
841 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, true, &IndexVT);
842
844 RISCVVType::VLMUL IndexLMUL =
846 unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
847 if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
848 reportFatalUsageError("The V extension does not support EEW=64 for index "
849 "values when XLEN=32");
850 }
851 const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
852 IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
853 static_cast<unsigned>(IndexLMUL));
854
855 auto PseudoMI = MIB.buildInstr(P->Pseudo, {DstReg}, SrcOps);
856
857 // Select VL
858 auto VLOpFn = renderVLOp(I.getOperand(CurOp++));
859 for (auto &RenderFn : *VLOpFn)
860 RenderFn(PseudoMI);
861
862 // SEW
863 PseudoMI.addImm(Log2SEW);
864
865 // Policy
866 uint64_t Policy = RISCVVType::MASK_AGNOSTIC;
867 if (IsMasked)
868 Policy = I.getOperand(CurOp++).getImm();
869 PseudoMI.addImm(Policy);
870
871 // Memref
872 PseudoMI.cloneMemRefs(I);
873
874 I.eraseFromParent();
875 return constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
876 }
877 case Intrinsic::riscv_vsm:
878 case Intrinsic::riscv_vse:
879 case Intrinsic::riscv_vse_mask:
880 case Intrinsic::riscv_vsse:
881 case Intrinsic::riscv_vsse_mask: {
882 bool IsMasked = IntrinID == Intrinsic::riscv_vse_mask ||
883 IntrinID == Intrinsic::riscv_vsse_mask;
884 bool IsStrided = IntrinID == Intrinsic::riscv_vsse ||
885 IntrinID == Intrinsic::riscv_vsse_mask;
886 LLT VT = MRI->getType(I.getOperand(1).getReg());
887 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
888
889 // Sources
890 unsigned CurOp = 1;
891 SmallVector<SrcOp, 4> SrcOps; // Source registers.
892
893 // Store value
894 auto PassthruReg = I.getOperand(CurOp++).getReg();
895 SrcOps.push_back(PassthruReg);
896
897 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, IsStrided);
898
900 const RISCV::VSEPseudo *P = RISCV::getVSEPseudo(
901 IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL));
902
903 auto PseudoMI = MIB.buildInstr(P->Pseudo, {}, SrcOps);
904
905 // Select VL
906 auto VLOpFn = renderVLOp(I.getOperand(CurOp++));
907 for (auto &RenderFn : *VLOpFn)
908 RenderFn(PseudoMI);
909
910 // SEW
911 PseudoMI.addImm(Log2SEW);
912
913 // Memref
914 PseudoMI.cloneMemRefs(I);
915
916 I.eraseFromParent();
917 return constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
918 }
919 case Intrinsic::riscv_vsoxei:
920 case Intrinsic::riscv_vsoxei_mask:
921 case Intrinsic::riscv_vsuxei:
922 case Intrinsic::riscv_vsuxei_mask: {
923 bool IsMasked = IntrinID == Intrinsic::riscv_vsoxei_mask ||
924 IntrinID == Intrinsic::riscv_vsuxei_mask;
925 bool IsOrdered = IntrinID == Intrinsic::riscv_vsoxei ||
926 IntrinID == Intrinsic::riscv_vsoxei_mask;
927 LLT VT = MRI->getType(I.getOperand(1).getReg());
928 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits());
929
930 // Sources
931 unsigned CurOp = 1;
932 SmallVector<SrcOp, 4> SrcOps; // Source registers.
933
934 // Store value
935 auto PassthruReg = I.getOperand(CurOp++).getReg();
936 SrcOps.push_back(PassthruReg);
937
938 LLT IndexVT;
939 addVectorLoadStoreOperands(I, SrcOps, CurOp, IsMasked, true, &IndexVT);
940
942 RISCVVType::VLMUL IndexLMUL =
944 unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
945 if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
946 reportFatalUsageError("The V extension does not support EEW=64 for index "
947 "values when XLEN=32");
948 }
949 const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
950 IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
951 static_cast<unsigned>(IndexLMUL));
952
953 auto PseudoMI = MIB.buildInstr(P->Pseudo, {}, SrcOps);
954
955 // Select VL
956 auto VLOpFn = renderVLOp(I.getOperand(CurOp++));
957 for (auto &RenderFn : *VLOpFn)
958 RenderFn(PseudoMI);
959
960 // SEW
961 PseudoMI.addImm(Log2SEW);
962
963 // Memref
964 PseudoMI.cloneMemRefs(I);
965
966 I.eraseFromParent();
967 return constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
968 }
969 }
970}
971
972bool RISCVInstructionSelector::selectIntrinsic(MachineInstr &I,
973 MachineIRBuilder &MIB) const {
974 // Find the intrinsic ID.
975 unsigned IntrinID = cast<GIntrinsic>(I).getIntrinsicID();
976 // Select the instruction.
977 switch (IntrinID) {
978 default:
979 return false;
980 case Intrinsic::riscv_vsetvli:
981 case Intrinsic::riscv_vsetvlimax: {
982
983 bool VLMax = IntrinID == Intrinsic::riscv_vsetvlimax;
984
985 unsigned Offset = VLMax ? 2 : 3;
986 unsigned SEW = RISCVVType::decodeVSEW(I.getOperand(Offset).getImm() & 0x7);
987 RISCVVType::VLMUL VLMul =
988 static_cast<RISCVVType::VLMUL>(I.getOperand(Offset + 1).getImm() & 0x7);
989
990 unsigned VTypeI = RISCVVType::encodeVTYPE(VLMul, SEW, /*TailAgnostic*/ true,
991 /*MaskAgnostic*/ true);
992
993 Register DstReg = I.getOperand(0).getReg();
994
995 Register VLOperand;
996 unsigned Opcode = RISCV::PseudoVSETVLI;
997
998 // Check if AVL is a constant that equals VLMAX.
999 if (!VLMax) {
1000 Register AVLReg = I.getOperand(2).getReg();
1001 if (auto AVLConst = getIConstantVRegValWithLookThrough(AVLReg, *MRI)) {
1002 uint64_t AVL = AVLConst->Value.getZExtValue();
1003 if (auto VLEN = Subtarget->getRealVLen()) {
1004 if (*VLEN / RISCVVType::getSEWLMULRatio(SEW, VLMul) == AVL)
1005 VLMax = true;
1006 }
1007 }
1008
1009 MachineInstr *AVLDef = MRI->getVRegDef(AVLReg);
1010 if (AVLDef && AVLDef->getOpcode() == TargetOpcode::G_CONSTANT) {
1011 const auto *C = AVLDef->getOperand(1).getCImm();
1012 if (C->getValue().isAllOnes())
1013 VLMax = true;
1014 }
1015 }
1016
1017 if (VLMax) {
1018 VLOperand = Register(RISCV::X0);
1019 Opcode = RISCV::PseudoVSETVLIX0;
1020 } else {
1021 Register AVLReg = I.getOperand(2).getReg();
1022 VLOperand = AVLReg;
1023
1024 // Check if AVL is a small constant that can use PseudoVSETIVLI.
1025 if (auto AVLConst = getIConstantVRegValWithLookThrough(AVLReg, *MRI)) {
1026 uint64_t AVL = AVLConst->Value.getZExtValue();
1027 if (isUInt<5>(AVL)) {
1028 auto PseudoMI = MIB.buildInstr(RISCV::PseudoVSETIVLI, {DstReg}, {})
1029 .addImm(AVL)
1030 .addImm(VTypeI);
1031 I.eraseFromParent();
1032 return constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
1033 }
1034 }
1035 }
1036
1037 auto PseudoMI =
1038 MIB.buildInstr(Opcode, {DstReg}, {VLOperand}).addImm(VTypeI);
1039 I.eraseFromParent();
1040 return constrainSelectedInstRegOperands(*PseudoMI, TII, TRI, RBI);
1041 }
1042 }
1043}
1044
1045bool RISCVInstructionSelector::selectExtractSubvector(
1046 MachineInstr &MI, MachineIRBuilder &MIB) const {
1047 assert(MI.getOpcode() == TargetOpcode::G_EXTRACT_SUBVECTOR);
1048
1049 Register DstReg = MI.getOperand(0).getReg();
1050 Register SrcReg = MI.getOperand(1).getReg();
1051
1052 LLT DstTy = MRI->getType(DstReg);
1053 LLT SrcTy = MRI->getType(SrcReg);
1054
1055 unsigned Idx = static_cast<unsigned>(MI.getOperand(2).getImm());
1056
1057 MVT DstMVT = getMVTForLLT(DstTy);
1058 MVT SrcMVT = getMVTForLLT(SrcTy);
1059
1060 unsigned SubRegIdx;
1061 std::tie(SubRegIdx, Idx) =
1063 SrcMVT, DstMVT, Idx, &TRI);
1064
1065 if (Idx != 0)
1066 return false;
1067
1068 unsigned DstRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(DstMVT);
1069 const TargetRegisterClass *DstRC = TRI.getRegClass(DstRegClassID);
1070 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
1071 return false;
1072
1073 unsigned SrcRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(SrcMVT);
1074 const TargetRegisterClass *SrcRC = TRI.getRegClass(SrcRegClassID);
1075 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
1076 return false;
1077
1078 MIB.buildInstr(TargetOpcode::COPY, {DstReg}, {}).addReg(SrcReg, 0, SubRegIdx);
1079
1080 MI.eraseFromParent();
1081 return true;
1082}
1083
1084bool RISCVInstructionSelector::select(MachineInstr &MI) {
1085 MachineIRBuilder MIB(MI);
1086
1087 preISelLower(MI, MIB);
1088 const unsigned Opc = MI.getOpcode();
1089
1090 if (!MI.isPreISelOpcode() || Opc == TargetOpcode::G_PHI) {
1091 if (Opc == TargetOpcode::PHI || Opc == TargetOpcode::G_PHI) {
1092 const Register DefReg = MI.getOperand(0).getReg();
1093 const LLT DefTy = MRI->getType(DefReg);
1094
1095 const RegClassOrRegBank &RegClassOrBank =
1096 MRI->getRegClassOrRegBank(DefReg);
1097
1098 const TargetRegisterClass *DefRC =
1100 if (!DefRC) {
1101 if (!DefTy.isValid()) {
1102 LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
1103 return false;
1104 }
1105
1106 const RegisterBank &RB = *cast<const RegisterBank *>(RegClassOrBank);
1107 DefRC = getRegClassForTypeOnBank(DefTy, RB);
1108 if (!DefRC) {
1109 LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
1110 return false;
1111 }
1112 }
1113
1114 MI.setDesc(TII.get(TargetOpcode::PHI));
1115 return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
1116 }
1117
1118 // Certain non-generic instructions also need some special handling.
1119 if (MI.isCopy())
1120 return selectCopy(MI);
1121
1122 return true;
1123 }
1124
1125 if (selectImpl(MI, *CoverageInfo))
1126 return true;
1127
1128 switch (Opc) {
1129 case TargetOpcode::G_ANYEXT:
1130 case TargetOpcode::G_PTRTOINT:
1131 case TargetOpcode::G_INTTOPTR:
1132 case TargetOpcode::G_TRUNC:
1133 case TargetOpcode::G_FREEZE:
1134 return selectCopy(MI);
1135 case TargetOpcode::G_CONSTANT: {
1136 Register DstReg = MI.getOperand(0).getReg();
1137 int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue();
1138
1139 if (!materializeImm(DstReg, Imm, MIB))
1140 return false;
1141
1142 MI.eraseFromParent();
1143 return true;
1144 }
1145 case TargetOpcode::G_ZEXT:
1146 case TargetOpcode::G_SEXT: {
1147 bool IsSigned = Opc != TargetOpcode::G_ZEXT;
1148 Register DstReg = MI.getOperand(0).getReg();
1149 Register SrcReg = MI.getOperand(1).getReg();
1150 LLT SrcTy = MRI->getType(SrcReg);
1151 unsigned SrcSize = SrcTy.getSizeInBits();
1152
1153 if (SrcTy.isVector())
1154 return false; // Should be handled by imported patterns.
1155
1156 assert((*RBI.getRegBank(DstReg, *MRI, TRI)).getID() ==
1157 RISCV::GPRBRegBankID &&
1158 "Unexpected ext regbank");
1159
1160 // Use addiw SrcReg, 0 (sext.w) for i32.
1161 if (IsSigned && SrcSize == 32) {
1162 MI.setDesc(TII.get(RISCV::ADDIW));
1163 MI.addOperand(MachineOperand::CreateImm(0));
1165 }
1166
1167 // Use add.uw SrcReg, X0 (zext.w) for i32 with Zba.
1168 if (!IsSigned && SrcSize == 32 && STI.hasStdExtZba()) {
1169 MI.setDesc(TII.get(RISCV::ADD_UW));
1170 MI.addOperand(MachineOperand::CreateReg(RISCV::X0, /*isDef=*/false));
1172 }
1173
1174 // Use sext.h/zext.h for i16 with Zbb.
1175 if (SrcSize == 16 && STI.hasStdExtZbb()) {
1176 MI.setDesc(TII.get(IsSigned ? RISCV::SEXT_H
1177 : STI.isRV64() ? RISCV::ZEXT_H_RV64
1178 : RISCV::ZEXT_H_RV32));
1180 }
1181
1182 // Use pack(w) SrcReg, X0 for i16 zext with Zbkb.
1183 if (!IsSigned && SrcSize == 16 && STI.hasStdExtZbkb()) {
1184 MI.setDesc(TII.get(STI.is64Bit() ? RISCV::PACKW : RISCV::PACK));
1185 MI.addOperand(MachineOperand::CreateReg(RISCV::X0, /*isDef=*/false));
1187 }
1188
1189 // Fall back to shift pair.
1190 auto ShiftLeft =
1191 MIB.buildInstr(RISCV::SLLI, {&RISCV::GPRRegClass}, {SrcReg})
1192 .addImm(STI.getXLen() - SrcSize);
1193 constrainSelectedInstRegOperands(*ShiftLeft, TII, TRI, RBI);
1194 auto ShiftRight = MIB.buildInstr(IsSigned ? RISCV::SRAI : RISCV::SRLI,
1195 {DstReg}, {ShiftLeft})
1196 .addImm(STI.getXLen() - SrcSize);
1197 constrainSelectedInstRegOperands(*ShiftRight, TII, TRI, RBI);
1198 MI.eraseFromParent();
1199 return true;
1200 }
1201 case TargetOpcode::G_FCONSTANT: {
1202 // TODO: Use constant pool for complex constants.
1203 Register DstReg = MI.getOperand(0).getReg();
1204 const APFloat &FPimm = MI.getOperand(1).getFPImm()->getValueAPF();
1205 unsigned Size = MRI->getType(DstReg).getSizeInBits();
1206 if (Size == 16 || Size == 32 || (Size == 64 && Subtarget->is64Bit())) {
1207 Register GPRReg;
1208 if (FPimm.isPosZero()) {
1209 GPRReg = RISCV::X0;
1210 } else {
1211 GPRReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1212 APInt Imm = FPimm.bitcastToAPInt();
1213 if (!materializeImm(GPRReg, Imm.getSExtValue(), MIB))
1214 return false;
1215 }
1216
1217 unsigned Opcode = Size == 64 ? RISCV::FMV_D_X
1218 : Size == 32 ? RISCV::FMV_W_X
1219 : RISCV::FMV_H_X;
1220 auto FMV = MIB.buildInstr(Opcode, {DstReg}, {GPRReg});
1221 if (!FMV.constrainAllUses(TII, TRI, RBI))
1222 return false;
1223 } else {
1224 // s64 on rv32
1225 assert(Size == 64 && !Subtarget->is64Bit() &&
1226 "Unexpected size or subtarget");
1227
1228 if (FPimm.isPosZero()) {
1229 // Optimize +0.0 to use fcvt.d.w
1230 MachineInstrBuilder FCVT =
1231 MIB.buildInstr(RISCV::FCVT_D_W, {DstReg}, {Register(RISCV::X0)})
1232 .addImm(RISCVFPRndMode::RNE);
1233 if (!FCVT.constrainAllUses(TII, TRI, RBI))
1234 return false;
1235
1236 MI.eraseFromParent();
1237 return true;
1238 }
1239
1240 // Split into two pieces and build through the stack.
1241 Register GPRRegHigh = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1242 Register GPRRegLow = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1243 APInt Imm = FPimm.bitcastToAPInt();
1244 if (!materializeImm(GPRRegHigh, Imm.extractBits(32, 32).getSExtValue(),
1245 MIB))
1246 return false;
1247 if (!materializeImm(GPRRegLow, Imm.trunc(32).getSExtValue(), MIB))
1248 return false;
1249 MachineInstrBuilder PairF64 = MIB.buildInstr(
1250 RISCV::BuildPairF64Pseudo, {DstReg}, {GPRRegLow, GPRRegHigh});
1251 if (!PairF64.constrainAllUses(TII, TRI, RBI))
1252 return false;
1253 }
1254
1255 MI.eraseFromParent();
1256 return true;
1257 }
1258 case TargetOpcode::G_GLOBAL_VALUE: {
1259 auto *GV = MI.getOperand(1).getGlobal();
1260 if (GV->isThreadLocal()) {
1261 // TODO: implement this case.
1262 return false;
1263 }
1264
1265 return selectAddr(MI, MIB, GV->isDSOLocal(), GV->hasExternalWeakLinkage());
1266 }
1267 case TargetOpcode::G_JUMP_TABLE:
1268 case TargetOpcode::G_CONSTANT_POOL:
1269 return selectAddr(MI, MIB, MRI);
1270 case TargetOpcode::G_BRCOND: {
1271 Register LHS, RHS;
1273 getOperandsForBranch(MI.getOperand(0).getReg(), CC, LHS, RHS, *MRI);
1274
1275 auto Bcc = MIB.buildInstr(RISCVCC::getBrCond(CC), {}, {LHS, RHS})
1276 .addMBB(MI.getOperand(1).getMBB());
1277 MI.eraseFromParent();
1278 return constrainSelectedInstRegOperands(*Bcc, TII, TRI, RBI);
1279 }
1280 case TargetOpcode::G_BRINDIRECT:
1281 MI.setDesc(TII.get(RISCV::PseudoBRIND));
1282 MI.addOperand(MachineOperand::CreateImm(0));
1284 case TargetOpcode::G_SELECT:
1285 return selectSelect(MI, MIB);
1286 case TargetOpcode::G_FCMP:
1287 return selectFPCompare(MI, MIB);
1288 case TargetOpcode::G_FENCE: {
1289 AtomicOrdering FenceOrdering =
1290 static_cast<AtomicOrdering>(MI.getOperand(0).getImm());
1291 SyncScope::ID FenceSSID =
1292 static_cast<SyncScope::ID>(MI.getOperand(1).getImm());
1293 emitFence(FenceOrdering, FenceSSID, MIB);
1294 MI.eraseFromParent();
1295 return true;
1296 }
1297 case TargetOpcode::G_IMPLICIT_DEF:
1298 return selectImplicitDef(MI, MIB);
1299 case TargetOpcode::G_UNMERGE_VALUES:
1300 return selectUnmergeValues(MI, MIB);
1301 case TargetOpcode::G_LOAD:
1302 case TargetOpcode::G_STORE: {
1303 GLoadStore &LdSt = cast<GLoadStore>(MI);
1304 const Register ValReg = LdSt.getReg(0);
1305 const Register PtrReg = LdSt.getPointerReg();
1306 LLT PtrTy = MRI->getType(PtrReg);
1307
1308 const RegisterBank &RB = *RBI.getRegBank(ValReg, *MRI, TRI);
1309 if (RB.getID() != RISCV::GPRBRegBankID)
1310 return false;
1311
1312#ifndef NDEBUG
1313 const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, *MRI, TRI);
1314 // Check that the pointer register is valid.
1315 assert(PtrRB.getID() == RISCV::GPRBRegBankID &&
1316 "Load/Store pointer operand isn't a GPR");
1317 assert(PtrTy.isPointer() && "Load/Store pointer operand isn't a pointer");
1318#endif
1319
1320 // Can only handle AddressSpace 0.
1321 if (PtrTy.getAddressSpace() != 0)
1322 return false;
1323
1324 unsigned MemSize = LdSt.getMemSizeInBits().getValue();
1325 AtomicOrdering Order = LdSt.getMMO().getSuccessOrdering();
1326
1327 if (isStrongerThanMonotonic(Order)) {
1328 MI.setDesc(TII.get(selectZalasrLoadStoreOp(Opc, MemSize)));
1330 }
1331
1332 const unsigned NewOpc = selectRegImmLoadStoreOp(MI.getOpcode(), MemSize);
1333 if (NewOpc == MI.getOpcode())
1334 return false;
1335
1336 // Check if we can fold anything into the addressing mode.
1337 auto AddrModeFns = selectAddrRegImm(MI.getOperand(1));
1338 if (!AddrModeFns)
1339 return false;
1340
1341 // Folded something. Create a new instruction and return it.
1342 auto NewInst = MIB.buildInstr(NewOpc, {}, {}, MI.getFlags());
1343 if (isa<GStore>(MI))
1344 NewInst.addUse(ValReg);
1345 else
1346 NewInst.addDef(ValReg);
1347 NewInst.cloneMemRefs(MI);
1348 for (auto &Fn : *AddrModeFns)
1349 Fn(NewInst);
1350 MI.eraseFromParent();
1351
1352 return constrainSelectedInstRegOperands(*NewInst, TII, TRI, RBI);
1353 }
1354 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1355 return selectIntrinsicWithSideEffects(MI, MIB);
1356 case TargetOpcode::G_INTRINSIC:
1357 return selectIntrinsic(MI, MIB);
1358 case TargetOpcode::G_EXTRACT_SUBVECTOR:
1359 return selectExtractSubvector(MI, MIB);
1360 default:
1361 return false;
1362 }
1363}
1364
1365bool RISCVInstructionSelector::selectUnmergeValues(
1366 MachineInstr &MI, MachineIRBuilder &MIB) const {
1367 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
1368
1369 if (!Subtarget->hasStdExtZfa())
1370 return false;
1371
1372 // Split F64 Src into two s32 parts
1373 if (MI.getNumOperands() != 3)
1374 return false;
1375 Register Src = MI.getOperand(2).getReg();
1376 Register Lo = MI.getOperand(0).getReg();
1377 Register Hi = MI.getOperand(1).getReg();
1378 if (!isRegInFprb(Src) || !isRegInGprb(Lo) || !isRegInGprb(Hi))
1379 return false;
1380
1381 MachineInstr *ExtractLo = MIB.buildInstr(RISCV::FMV_X_W_FPR64, {Lo}, {Src});
1382 if (!constrainSelectedInstRegOperands(*ExtractLo, TII, TRI, RBI))
1383 return false;
1384
1385 MachineInstr *ExtractHi = MIB.buildInstr(RISCV::FMVH_X_D, {Hi}, {Src});
1386 if (!constrainSelectedInstRegOperands(*ExtractHi, TII, TRI, RBI))
1387 return false;
1388
1389 MI.eraseFromParent();
1390 return true;
1391}
1392
1393bool RISCVInstructionSelector::replacePtrWithInt(MachineOperand &Op,
1394 MachineIRBuilder &MIB) {
1395 Register PtrReg = Op.getReg();
1396 assert(MRI->getType(PtrReg).isPointer() && "Operand is not a pointer!");
1397
1398 const LLT sXLen = LLT::scalar(STI.getXLen());
1399 auto PtrToInt = MIB.buildPtrToInt(sXLen, PtrReg);
1400 MRI->setRegBank(PtrToInt.getReg(0), RBI.getRegBank(RISCV::GPRBRegBankID));
1401 Op.setReg(PtrToInt.getReg(0));
1402 return select(*PtrToInt);
1403}
1404
1405void RISCVInstructionSelector::preISelLower(MachineInstr &MI,
1406 MachineIRBuilder &MIB) {
1407 switch (MI.getOpcode()) {
1408 case TargetOpcode::G_PTR_ADD: {
1409 Register DstReg = MI.getOperand(0).getReg();
1410 const LLT sXLen = LLT::scalar(STI.getXLen());
1411
1412 replacePtrWithInt(MI.getOperand(1), MIB);
1413 MI.setDesc(TII.get(TargetOpcode::G_ADD));
1414 MRI->setType(DstReg, sXLen);
1415 break;
1416 }
1417 case TargetOpcode::G_PTRMASK: {
1418 Register DstReg = MI.getOperand(0).getReg();
1419 const LLT sXLen = LLT::scalar(STI.getXLen());
1420 replacePtrWithInt(MI.getOperand(1), MIB);
1421 MI.setDesc(TII.get(TargetOpcode::G_AND));
1422 MRI->setType(DstReg, sXLen);
1423 break;
1424 }
1425 }
1426}
1427
1428void RISCVInstructionSelector::renderNegImm(MachineInstrBuilder &MIB,
1429 const MachineInstr &MI,
1430 int OpIdx) const {
1431 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1432 "Expected G_CONSTANT");
1433 int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
1434 MIB.addImm(-CstVal);
1435}
1436
1437void RISCVInstructionSelector::renderImmSubFromXLen(MachineInstrBuilder &MIB,
1438 const MachineInstr &MI,
1439 int OpIdx) const {
1440 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1441 "Expected G_CONSTANT");
1442 uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();
1443 MIB.addImm(STI.getXLen() - CstVal);
1444}
1445
1446void RISCVInstructionSelector::renderImmSubFrom32(MachineInstrBuilder &MIB,
1447 const MachineInstr &MI,
1448 int OpIdx) const {
1449 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1450 "Expected G_CONSTANT");
1451 uint64_t CstVal = MI.getOperand(1).getCImm()->getZExtValue();
1452 MIB.addImm(32 - CstVal);
1453}
1454
1455void RISCVInstructionSelector::renderImmPlus1(MachineInstrBuilder &MIB,
1456 const MachineInstr &MI,
1457 int OpIdx) const {
1458 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1459 "Expected G_CONSTANT");
1460 int64_t CstVal = MI.getOperand(1).getCImm()->getSExtValue();
1461 MIB.addImm(CstVal + 1);
1462}
1463
1464void RISCVInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
1465 const MachineInstr &MI,
1466 int OpIdx) const {
1467 assert(MI.getOpcode() == TargetOpcode::G_FRAME_INDEX && OpIdx == -1 &&
1468 "Expected G_FRAME_INDEX");
1469 MIB.add(MI.getOperand(1));
1470}
1471
1472void RISCVInstructionSelector::renderTrailingZeros(MachineInstrBuilder &MIB,
1473 const MachineInstr &MI,
1474 int OpIdx) const {
1475 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1476 "Expected G_CONSTANT");
1477 uint64_t C = MI.getOperand(1).getCImm()->getZExtValue();
1479}
1480
1481void RISCVInstructionSelector::renderXLenSubTrailingOnes(
1482 MachineInstrBuilder &MIB, const MachineInstr &MI, int OpIdx) const {
1483 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1484 "Expected G_CONSTANT");
1485 uint64_t C = MI.getOperand(1).getCImm()->getZExtValue();
1486 MIB.addImm(Subtarget->getXLen() - llvm::countr_one(C));
1487}
1488
1489void RISCVInstructionSelector::renderAddiPairImmSmall(MachineInstrBuilder &MIB,
1490 const MachineInstr &MI,
1491 int OpIdx) const {
1492 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1493 "Expected G_CONSTANT");
1494 int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue();
1495 int64_t Adj = Imm < 0 ? -2048 : 2047;
1496 MIB.addImm(Imm - Adj);
1497}
1498
1499void RISCVInstructionSelector::renderAddiPairImmLarge(MachineInstrBuilder &MIB,
1500 const MachineInstr &MI,
1501 int OpIdx) const {
1502 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1503 "Expected G_CONSTANT");
1504 int64_t Imm = MI.getOperand(1).getCImm()->getSExtValue() < 0 ? -2048 : 2047;
1505 MIB.addImm(Imm);
1506}
1507
1508const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank(
1509 LLT Ty, const RegisterBank &RB) const {
1510 if (RB.getID() == RISCV::GPRBRegBankID) {
1511 if (Ty.getSizeInBits() <= 32 || (STI.is64Bit() && Ty.getSizeInBits() == 64))
1512 return &RISCV::GPRRegClass;
1513 }
1514
1515 if (RB.getID() == RISCV::FPRBRegBankID) {
1516 if (Ty.getSizeInBits() == 16)
1517 return &RISCV::FPR16RegClass;
1518 if (Ty.getSizeInBits() == 32)
1519 return &RISCV::FPR32RegClass;
1520 if (Ty.getSizeInBits() == 64)
1521 return &RISCV::FPR64RegClass;
1522 }
1523
1524 if (RB.getID() == RISCV::VRBRegBankID) {
1525 if (Ty.getSizeInBits().getKnownMinValue() <= 64)
1526 return &RISCV::VRRegClass;
1527
1528 if (Ty.getSizeInBits().getKnownMinValue() == 128)
1529 return &RISCV::VRM2RegClass;
1530
1531 if (Ty.getSizeInBits().getKnownMinValue() == 256)
1532 return &RISCV::VRM4RegClass;
1533
1534 if (Ty.getSizeInBits().getKnownMinValue() == 512)
1535 return &RISCV::VRM8RegClass;
1536 }
1537
1538 return nullptr;
1539}
1540
1541bool RISCVInstructionSelector::isRegInGprb(Register Reg) const {
1542 return RBI.getRegBank(Reg, *MRI, TRI)->getID() == RISCV::GPRBRegBankID;
1543}
1544
1545bool RISCVInstructionSelector::isRegInFprb(Register Reg) const {
1546 return RBI.getRegBank(Reg, *MRI, TRI)->getID() == RISCV::FPRBRegBankID;
1547}
1548
1549bool RISCVInstructionSelector::selectCopy(MachineInstr &MI) const {
1550 Register DstReg = MI.getOperand(0).getReg();
1551
1552 if (DstReg.isPhysical())
1553 return true;
1554
1555 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
1556 MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI));
1557 assert(DstRC &&
1558 "Register class not available for LLT, register bank combination");
1559
1560 // No need to constrain SrcReg. It will get constrained when
1561 // we hit another of its uses or its defs.
1562 // Copies do not have constraints.
1563 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1564 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
1565 << " operand\n");
1566 return false;
1567 }
1568
1569 MI.setDesc(TII.get(RISCV::COPY));
1570 return true;
1571}
1572
1573bool RISCVInstructionSelector::selectImplicitDef(MachineInstr &MI,
1574 MachineIRBuilder &MIB) const {
1575 assert(MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
1576
1577 const Register DstReg = MI.getOperand(0).getReg();
1578 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
1579 MRI->getType(DstReg), *RBI.getRegBank(DstReg, *MRI, TRI));
1580
1581 assert(DstRC &&
1582 "Register class not available for LLT, register bank combination");
1583
1584 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1585 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(MI.getOpcode())
1586 << " operand\n");
1587 }
1588 MI.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
1589 return true;
1590}
1591
1592bool RISCVInstructionSelector::materializeImm(Register DstReg, int64_t Imm,
1593 MachineIRBuilder &MIB) const {
1594 if (Imm == 0) {
1595 MIB.buildCopy(DstReg, Register(RISCV::X0));
1596 RBI.constrainGenericRegister(DstReg, RISCV::GPRRegClass, *MRI);
1597 return true;
1598 }
1599
1601 unsigned NumInsts = Seq.size();
1602 Register SrcReg = RISCV::X0;
1603
1604 for (unsigned i = 0; i < NumInsts; i++) {
1605 Register TmpReg = i < NumInsts - 1
1606 ? MRI->createVirtualRegister(&RISCV::GPRRegClass)
1607 : DstReg;
1608 const RISCVMatInt::Inst &I = Seq[i];
1609 MachineInstr *Result;
1610
1611 switch (I.getOpndKind()) {
1612 case RISCVMatInt::Imm:
1613 // clang-format off
1614 Result = MIB.buildInstr(I.getOpcode(), {TmpReg}, {})
1615 .addImm(I.getImm());
1616 // clang-format on
1617 break;
1618 case RISCVMatInt::RegX0:
1619 Result = MIB.buildInstr(I.getOpcode(), {TmpReg},
1620 {SrcReg, Register(RISCV::X0)});
1621 break;
1623 Result = MIB.buildInstr(I.getOpcode(), {TmpReg}, {SrcReg, SrcReg});
1624 break;
1626 Result =
1627 MIB.buildInstr(I.getOpcode(), {TmpReg}, {SrcReg}).addImm(I.getImm());
1628 break;
1629 }
1630
1631 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1632 return false;
1633
1634 SrcReg = TmpReg;
1635 }
1636
1637 return true;
1638}
1639
1640bool RISCVInstructionSelector::selectAddr(MachineInstr &MI,
1641 MachineIRBuilder &MIB, bool IsLocal,
1642 bool IsExternWeak) const {
1643 assert((MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1644 MI.getOpcode() == TargetOpcode::G_JUMP_TABLE ||
1645 MI.getOpcode() == TargetOpcode::G_CONSTANT_POOL) &&
1646 "Unexpected opcode");
1647
1648 const MachineOperand &DispMO = MI.getOperand(1);
1649
1650 Register DefReg = MI.getOperand(0).getReg();
1651 const LLT DefTy = MRI->getType(DefReg);
1652
1653 // When HWASAN is used and tagging of global variables is enabled
1654 // they should be accessed via the GOT, since the tagged address of a global
1655 // is incompatible with existing code models. This also applies to non-pic
1656 // mode.
1657 if (TM.isPositionIndependent() || Subtarget->allowTaggedGlobals()) {
1658 if (IsLocal && !Subtarget->allowTaggedGlobals()) {
1659 // Use PC-relative addressing to access the symbol. This generates the
1660 // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
1661 // %pcrel_lo(auipc)).
1662 MI.setDesc(TII.get(RISCV::PseudoLLA));
1664 }
1665
1666 // Use PC-relative addressing to access the GOT for this symbol, then
1667 // load the address from the GOT. This generates the pattern (PseudoLGA
1668 // sym), which expands to (ld (addi (auipc %got_pcrel_hi(sym))
1669 // %pcrel_lo(auipc))).
1670 MachineFunction &MF = *MI.getParent()->getParent();
1671 MachineMemOperand *MemOp = MF.getMachineMemOperand(
1675 DefTy, Align(DefTy.getSizeInBits() / 8));
1676
1677 auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {})
1678 .addDisp(DispMO, 0)
1679 .addMemOperand(MemOp);
1680
1681 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1682 return false;
1683
1684 MI.eraseFromParent();
1685 return true;
1686 }
1687
1688 switch (TM.getCodeModel()) {
1689 default: {
1691 "Unsupported code model for lowering", MI);
1692 return false;
1693 }
1694 case CodeModel::Small: {
1695 // Must lie within a single 2 GiB address range and must lie between
1696 // absolute addresses -2 GiB and +2 GiB. This generates the pattern (addi
1697 // (lui %hi(sym)) %lo(sym)).
1698 Register AddrHiDest = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1699 MachineInstr *AddrHi = MIB.buildInstr(RISCV::LUI, {AddrHiDest}, {})
1700 .addDisp(DispMO, 0, RISCVII::MO_HI);
1701
1702 if (!constrainSelectedInstRegOperands(*AddrHi, TII, TRI, RBI))
1703 return false;
1704
1705 auto Result = MIB.buildInstr(RISCV::ADDI, {DefReg}, {AddrHiDest})
1706 .addDisp(DispMO, 0, RISCVII::MO_LO);
1707
1708 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1709 return false;
1710
1711 MI.eraseFromParent();
1712 return true;
1713 }
1714 case CodeModel::Medium:
1715 // Emit LGA/LLA instead of the sequence it expands to because the pcrel_lo
1716 // relocation needs to reference a label that points to the auipc
1717 // instruction itself, not the global. This cannot be done inside the
1718 // instruction selector.
1719 if (IsExternWeak) {
1720 // An extern weak symbol may be undefined, i.e. have value 0, which may
1721 // not be within 2GiB of PC, so use GOT-indirect addressing to access the
1722 // symbol. This generates the pattern (PseudoLGA sym), which expands to
1723 // (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
1724 MachineFunction &MF = *MI.getParent()->getParent();
1725 MachineMemOperand *MemOp = MF.getMachineMemOperand(
1729 DefTy, Align(DefTy.getSizeInBits() / 8));
1730
1731 auto Result = MIB.buildInstr(RISCV::PseudoLGA, {DefReg}, {})
1732 .addDisp(DispMO, 0)
1733 .addMemOperand(MemOp);
1734
1735 if (!constrainSelectedInstRegOperands(*Result, TII, TRI, RBI))
1736 return false;
1737
1738 MI.eraseFromParent();
1739 return true;
1740 }
1741
1742 // Generate a sequence for accessing addresses within any 2GiB range
1743 // within the address space. This generates the pattern (PseudoLLA sym),
1744 // which expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
1745 MI.setDesc(TII.get(RISCV::PseudoLLA));
1747 }
1748
1749 return false;
1750}
1751
1752bool RISCVInstructionSelector::selectSelect(MachineInstr &MI,
1753 MachineIRBuilder &MIB) const {
1754 auto &SelectMI = cast<GSelect>(MI);
1755
1756 Register LHS, RHS;
1758 getOperandsForBranch(SelectMI.getCondReg(), CC, LHS, RHS, *MRI);
1759
1760 Register DstReg = SelectMI.getReg(0);
1761
1762 unsigned Opc = RISCV::Select_GPR_Using_CC_GPR;
1763 if (RBI.getRegBank(DstReg, *MRI, TRI)->getID() == RISCV::FPRBRegBankID) {
1764 unsigned Size = MRI->getType(DstReg).getSizeInBits();
1765 Opc = Size == 32 ? RISCV::Select_FPR32_Using_CC_GPR
1766 : RISCV::Select_FPR64_Using_CC_GPR;
1767 }
1768
1769 MachineInstr *Result = MIB.buildInstr(Opc)
1770 .addDef(DstReg)
1771 .addReg(LHS)
1772 .addReg(RHS)
1773 .addImm(CC)
1774 .addReg(SelectMI.getTrueReg())
1775 .addReg(SelectMI.getFalseReg());
1776 MI.eraseFromParent();
1777 return constrainSelectedInstRegOperands(*Result, TII, TRI, RBI);
1778}
1779
1780// Convert an FCMP predicate to one of the supported F or D instructions.
1781static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size) {
1782 assert((Size == 16 || Size == 32 || Size == 64) && "Unsupported size");
1783 switch (Pred) {
1784 default:
1785 llvm_unreachable("Unsupported predicate");
1786 case CmpInst::FCMP_OLT:
1787 return Size == 16 ? RISCV::FLT_H : Size == 32 ? RISCV::FLT_S : RISCV::FLT_D;
1788 case CmpInst::FCMP_OLE:
1789 return Size == 16 ? RISCV::FLE_H : Size == 32 ? RISCV::FLE_S : RISCV::FLE_D;
1790 case CmpInst::FCMP_OEQ:
1791 return Size == 16 ? RISCV::FEQ_H : Size == 32 ? RISCV::FEQ_S : RISCV::FEQ_D;
1792 }
1793}
1794
1795// Try legalizing an FCMP by swapping or inverting the predicate to one that
1796// is supported.
1798 CmpInst::Predicate &Pred, bool &NeedInvert) {
1799 auto isLegalFCmpPredicate = [](CmpInst::Predicate Pred) {
1800 return Pred == CmpInst::FCMP_OLT || Pred == CmpInst::FCMP_OLE ||
1801 Pred == CmpInst::FCMP_OEQ;
1802 };
1803
1804 assert(!isLegalFCmpPredicate(Pred) && "Predicate already legal?");
1805
1807 if (isLegalFCmpPredicate(InvPred)) {
1808 Pred = InvPred;
1809 std::swap(LHS, RHS);
1810 return true;
1811 }
1812
1813 InvPred = CmpInst::getInversePredicate(Pred);
1814 NeedInvert = true;
1815 if (isLegalFCmpPredicate(InvPred)) {
1816 Pred = InvPred;
1817 return true;
1818 }
1819 InvPred = CmpInst::getSwappedPredicate(InvPred);
1820 if (isLegalFCmpPredicate(InvPred)) {
1821 Pred = InvPred;
1822 std::swap(LHS, RHS);
1823 return true;
1824 }
1825
1826 return false;
1827}
1828
1829// Emit a sequence of instructions to compare LHS and RHS using Pred. Return
1830// the result in DstReg.
1831// FIXME: Maybe we should expand this earlier.
1832bool RISCVInstructionSelector::selectFPCompare(MachineInstr &MI,
1833 MachineIRBuilder &MIB) const {
1834 auto &CmpMI = cast<GFCmp>(MI);
1835 CmpInst::Predicate Pred = CmpMI.getCond();
1836
1837 Register DstReg = CmpMI.getReg(0);
1838 Register LHS = CmpMI.getLHSReg();
1839 Register RHS = CmpMI.getRHSReg();
1840
1841 unsigned Size = MRI->getType(LHS).getSizeInBits();
1842 assert((Size == 16 || Size == 32 || Size == 64) && "Unexpected size");
1843
1844 Register TmpReg = DstReg;
1845
1846 bool NeedInvert = false;
1847 // First try swapping operands or inverting.
1848 if (legalizeFCmpPredicate(LHS, RHS, Pred, NeedInvert)) {
1849 if (NeedInvert)
1850 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1851 auto Cmp = MIB.buildInstr(getFCmpOpcode(Pred, Size), {TmpReg}, {LHS, RHS});
1852 if (!Cmp.constrainAllUses(TII, TRI, RBI))
1853 return false;
1854 } else if (Pred == CmpInst::FCMP_ONE || Pred == CmpInst::FCMP_UEQ) {
1855 // fcmp one LHS, RHS => (OR (FLT LHS, RHS), (FLT RHS, LHS))
1856 NeedInvert = Pred == CmpInst::FCMP_UEQ;
1858 {&RISCV::GPRRegClass}, {LHS, RHS});
1859 if (!Cmp1.constrainAllUses(TII, TRI, RBI))
1860 return false;
1862 {&RISCV::GPRRegClass}, {RHS, LHS});
1863 if (!Cmp2.constrainAllUses(TII, TRI, RBI))
1864 return false;
1865 if (NeedInvert)
1866 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1867 auto Or =
1868 MIB.buildInstr(RISCV::OR, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1869 if (!Or.constrainAllUses(TII, TRI, RBI))
1870 return false;
1871 } else if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) {
1872 // fcmp ord LHS, RHS => (AND (FEQ LHS, LHS), (FEQ RHS, RHS))
1873 // FIXME: If LHS and RHS are the same we can use a single FEQ.
1874 NeedInvert = Pred == CmpInst::FCMP_UNO;
1876 {&RISCV::GPRRegClass}, {LHS, LHS});
1877 if (!Cmp1.constrainAllUses(TII, TRI, RBI))
1878 return false;
1880 {&RISCV::GPRRegClass}, {RHS, RHS});
1881 if (!Cmp2.constrainAllUses(TII, TRI, RBI))
1882 return false;
1883 if (NeedInvert)
1884 TmpReg = MRI->createVirtualRegister(&RISCV::GPRRegClass);
1885 auto And =
1886 MIB.buildInstr(RISCV::AND, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1887 if (!And.constrainAllUses(TII, TRI, RBI))
1888 return false;
1889 } else
1890 llvm_unreachable("Unhandled predicate");
1891
1892 // Emit an XORI to invert the result if needed.
1893 if (NeedInvert) {
1894 auto Xor = MIB.buildInstr(RISCV::XORI, {DstReg}, {TmpReg}).addImm(1);
1895 if (!Xor.constrainAllUses(TII, TRI, RBI))
1896 return false;
1897 }
1898
1899 MI.eraseFromParent();
1900 return true;
1901}
1902
1903void RISCVInstructionSelector::emitFence(AtomicOrdering FenceOrdering,
1904 SyncScope::ID FenceSSID,
1905 MachineIRBuilder &MIB) const {
1906 if (STI.hasStdExtZtso()) {
1907 // The only fence that needs an instruction is a sequentially-consistent
1908 // cross-thread fence.
1909 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
1910 FenceSSID == SyncScope::System) {
1911 // fence rw, rw
1912 MIB.buildInstr(RISCV::FENCE, {}, {})
1915 return;
1916 }
1917
1918 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
1919 MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1920 return;
1921 }
1922
1923 // singlethread fences only synchronize with signal handlers on the same
1924 // thread and thus only need to preserve instruction order, not actually
1925 // enforce memory ordering.
1926 if (FenceSSID == SyncScope::SingleThread) {
1927 MIB.buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1928 return;
1929 }
1930
1931 // Refer to Table A.6 in the version 2.3 draft of the RISC-V Instruction Set
1932 // Manual: Volume I.
1933 unsigned Pred, Succ;
1934 switch (FenceOrdering) {
1935 default:
1936 llvm_unreachable("Unexpected ordering");
1937 case AtomicOrdering::AcquireRelease:
1938 // fence acq_rel -> fence.tso
1939 MIB.buildInstr(RISCV::FENCE_TSO, {}, {});
1940 return;
1941 case AtomicOrdering::Acquire:
1942 // fence acquire -> fence r, rw
1943 Pred = RISCVFenceField::R;
1945 break;
1946 case AtomicOrdering::Release:
1947 // fence release -> fence rw, w
1949 Succ = RISCVFenceField::W;
1950 break;
1951 case AtomicOrdering::SequentiallyConsistent:
1952 // fence seq_cst -> fence rw, rw
1955 break;
1956 }
1957 MIB.buildInstr(RISCV::FENCE, {}, {}).addImm(Pred).addImm(Succ);
1958}
1959
1960namespace llvm {
1961InstructionSelector *
1963 const RISCVSubtarget &Subtarget,
1964 const RISCVRegisterBankInfo &RBI) {
1965 return new RISCVInstructionSelector(TM, Subtarget, RBI);
1966}
1967} // end namespace llvm
unsigned const MachineRegisterInfo * MRI
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Provides analysis for querying information about KnownBits during GISel passes.
#define DEBUG_TYPE
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
static bool hasAllWUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI)
static bool hasAllNBitUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI, unsigned OrigBits)
#define I(x, y, z)
Definition MD5.cpp:57
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
Register Reg
Register const TargetRegisterInfo * TRI
Promote Memory to Register
Definition Mem2Reg.cpp:110
MachineInstr unsigned OpIdx
#define P(N)
static StringRef getName(Value *V)
static unsigned selectRegImmLoadStoreOp(unsigned GenericOpc, unsigned OpSize)
Select the RISC-V regimm opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the G...
static unsigned selectZalasrLoadStoreOp(unsigned GenericOpc, unsigned OpSize)
Select the RISC-V Zalasr opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the G...
static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size)
static bool legalizeFCmpPredicate(Register &LHS, Register &RHS, CmpInst::Predicate &Pred, bool &NeedInvert)
static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC, Register &LHS, Register &RHS, MachineRegisterInfo &MRI)
const SmallVectorImpl< MachineOperand > & Cond
This file declares the targeting of the RegisterBankInfo class for RISC-V.
#define LLVM_DEBUG(...)
Definition Debug.h:114
Value * RHS
Value * LHS
APInt bitcastToAPInt() const
Definition APFloat.h:1335
bool isPosZero() const
Definition APFloat.h:1442
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1489
bool ult(const APInt &RHS) const
Unsigned less than comparison.
Definition APInt.h:1112
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
Definition APInt.h:476
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
Definition APInt.h:287
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:679
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:682
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition InstrTypes.h:684
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition InstrTypes.h:687
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:683
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition InstrTypes.h:685
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:686
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
Definition Constants.h:177
This is an important base class in LLVM.
Definition Constant.h:43
virtual void setupMF(MachineFunction &mf, GISelValueTracking *vt, CodeGenCoverage *covinfo=nullptr, ProfileSummaryInfo *psi=nullptr, BlockFrequencyInfo *bfi=nullptr)
Setup per-MF executor state.
Register getPointerReg() const
Get the source register of the pointer value.
MachineMemOperand & getMMO() const
Get the MachineMemOperand on this instruction.
LocationSize getMemSizeInBits() const
Returns the size in bits of the memory access.
Register getReg(unsigned Idx) const
Access the Idx'th operand as a register and return it.
constexpr unsigned getScalarSizeInBits() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
constexpr unsigned getAddressSpace() const
TypeSize getValue() const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Helper class to build MachineInstr.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildPtrToInt(const DstOp &Dst, const SrcOp &Src)
Build and insert a G_PTRTOINT instruction.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
unsigned getOperandNo(const_mop_iterator I) const
Returns the number of the operand iterator I points to.
const MachineOperand & getOperand(unsigned i) const
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Analysis providing profile information.
This class provides the information for the target register banks.
unsigned getXLen() const
std::optional< unsigned > getRealVLen() const
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
static unsigned getRegClassIDForVecVT(MVT VT)
static RISCVVType::VLMUL getLMUL(MVT VT)
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
This class implements the register bank concept.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Definition Register.h:83
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
bool isPositionIndependent() const
CodeModel::Model getCodeModel() const
Returns the code model.
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:165
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
operand_type_match m_Reg()
SpecificConstantMatch m_SpecificICst(const APInt &RequestedValue)
Matches a constant equal to RequestedValue.
operand_type_match m_Pred()
UnaryOp_match< SrcTy, TargetOpcode::G_ZEXT > m_GZExt(const SrcTy &Src)
ConstantMatch< APInt > m_ICst(APInt &Cst)
BinaryOp_match< LHS, RHS, TargetOpcode::G_ADD, true > m_GAdd(const LHS &L, const RHS &R)
OneNonDBGUse_match< SubPat > m_OneNonDBGUse(const SubPat &SP)
CompareOp_match< Pred, LHS, RHS, TargetOpcode::G_ICMP > m_GICmp(const Pred &P, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SUB > m_GSub(const LHS &L, const RHS &R)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SHL, false > m_GShl(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_AND, true > m_GAnd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_LSHR, false > m_GLShr(const LHS &L, const RHS &R)
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
SmallVector< Inst, 8 > InstSeq
Definition RISCVMatInt.h:43
static unsigned decodeVSEW(unsigned VSEW)
LLVM_ABI unsigned getSEWLMULRatio(unsigned SEW, VLMUL VLMul)
LLVM_ABI unsigned encodeVTYPE(VLMUL VLMUL, unsigned SEW, bool TailAgnostic, bool MaskAgnostic, bool AltFmt=false)
static constexpr int64_t VLMaxSentinel
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
Definition LLVMContext.h:55
@ System
Synchronized with respect to all concurrently executing threads.
Definition LLVMContext.h:58
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
PointerUnion< const TargetRegisterClass *, const RegisterBank * > RegClassOrRegBank
Convenient type to represent either a register class or a register bank.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
Definition MathExtras.h:165
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
bool isStrongerThanMonotonic(AtomicOrdering AO)
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
Definition bit.h:293
LLVM_ABI bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
Definition Utils.cpp:155
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
Definition bit.h:303
LLVM_ABI MVT getMVTForLLT(LLT Ty)
Get a rough equivalent of an MVT for a given LLT.
InstructionSelector * createRISCVInstructionSelector(const RISCVTargetMachine &TM, const RISCVSubtarget &Subtarget, const RISCVRegisterBankInfo &RBI)
LLVM_ABI std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
Definition Utils.cpp:315
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:202
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:331
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
LLVM_ABI void reportGISelFailure(MachineFunction &MF, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel error as a missed optimization remark to the LLVMContext's diagnostic stream.
Definition Utils.cpp:259
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:189
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
AtomicOrdering
Atomic ordering for LLVM's memory model.
constexpr T maskTrailingZeros(unsigned N)
Create a bitmask with the N right-most bits set to 0, and all other bits set to 1.
Definition MathExtras.h:94
@ Or
Bitwise or logical OR of integers.
@ Xor
Bitwise or logical XOR of integers.
@ And
Bitwise or logical AND of integers.
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
LLVM_ABI std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
Definition Utils.cpp:434
constexpr T maskTrailingOnes(unsigned N)
Create a bitmask with the N right-most bits set to 1, and all other bits set to 0.
Definition MathExtras.h:77
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
Definition Error.cpp:180
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:872
#define MORE()
Definition regcomp.c:246
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.