LLVM 19.0.0git
RISCVLegalizerInfo.cpp
Go to the documentation of this file.
1//===-- RISCVLegalizerInfo.cpp ----------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the targeting of the Machinelegalizer class for RISC-V.
10/// \todo This should be generated by TableGen.
11//===----------------------------------------------------------------------===//
12
13#include "RISCVLegalizerInfo.h"
16#include "RISCVSubtarget.h"
26#include "llvm/IR/Type.h"
27
28using namespace llvm;
29using namespace LegalityPredicates;
30using namespace LegalizeMutations;
31
32// Is this type supported by scalar FP arithmetic operations given the current
33// subtarget.
34static LegalityPredicate typeIsScalarFPArith(unsigned TypeIdx,
35 const RISCVSubtarget &ST) {
36 return [=, &ST](const LegalityQuery &Query) {
37 return Query.Types[TypeIdx].isScalar() &&
38 ((ST.hasStdExtF() && Query.Types[TypeIdx].getSizeInBits() == 32) ||
39 (ST.hasStdExtD() && Query.Types[TypeIdx].getSizeInBits() == 64));
40 };
41}
42
44 : STI(ST), XLen(STI.getXLen()), sXLen(LLT::scalar(XLen)) {
45 const LLT sDoubleXLen = LLT::scalar(2 * XLen);
46 const LLT p0 = LLT::pointer(0, XLen);
47 const LLT s1 = LLT::scalar(1);
48 const LLT s8 = LLT::scalar(8);
49 const LLT s16 = LLT::scalar(16);
50 const LLT s32 = LLT::scalar(32);
51 const LLT s64 = LLT::scalar(64);
52
53 const LLT nxv1s8 = LLT::scalable_vector(1, s8);
54 const LLT nxv2s8 = LLT::scalable_vector(2, s8);
55 const LLT nxv4s8 = LLT::scalable_vector(4, s8);
56 const LLT nxv8s8 = LLT::scalable_vector(8, s8);
57 const LLT nxv16s8 = LLT::scalable_vector(16, s8);
58 const LLT nxv32s8 = LLT::scalable_vector(32, s8);
59 const LLT nxv64s8 = LLT::scalable_vector(64, s8);
60
61 const LLT nxv1s16 = LLT::scalable_vector(1, s16);
62 const LLT nxv2s16 = LLT::scalable_vector(2, s16);
63 const LLT nxv4s16 = LLT::scalable_vector(4, s16);
64 const LLT nxv8s16 = LLT::scalable_vector(8, s16);
65 const LLT nxv16s16 = LLT::scalable_vector(16, s16);
66 const LLT nxv32s16 = LLT::scalable_vector(32, s16);
67
68 const LLT nxv1s32 = LLT::scalable_vector(1, s32);
69 const LLT nxv2s32 = LLT::scalable_vector(2, s32);
70 const LLT nxv4s32 = LLT::scalable_vector(4, s32);
71 const LLT nxv8s32 = LLT::scalable_vector(8, s32);
72 const LLT nxv16s32 = LLT::scalable_vector(16, s32);
73
74 const LLT nxv1s64 = LLT::scalable_vector(1, s64);
75 const LLT nxv2s64 = LLT::scalable_vector(2, s64);
76 const LLT nxv4s64 = LLT::scalable_vector(4, s64);
77 const LLT nxv8s64 = LLT::scalable_vector(8, s64);
78
79 using namespace TargetOpcode;
80
81 auto AllVecTys = {nxv1s8, nxv2s8, nxv4s8, nxv8s8, nxv16s8, nxv32s8,
82 nxv64s8, nxv1s16, nxv2s16, nxv4s16, nxv8s16, nxv16s16,
83 nxv32s16, nxv1s32, nxv2s32, nxv4s32, nxv8s32, nxv16s32,
84 nxv1s64, nxv2s64, nxv4s64, nxv8s64};
85
86 getActionDefinitionsBuilder({G_ADD, G_SUB, G_AND, G_OR, G_XOR})
87 .legalFor({s32, sXLen})
88 .legalIf(all(
89 typeInSet(0, AllVecTys),
90 LegalityPredicate([=, &ST](const LegalityQuery &Query) {
91 return ST.hasVInstructions() &&
92 (Query.Types[0].getScalarSizeInBits() != 64 ||
93 ST.hasVInstructionsI64()) &&
94 (Query.Types[0].getElementCount().getKnownMinValue() != 1 ||
95 ST.getELen() == 64);
96 })))
98 .clampScalar(0, s32, sXLen);
99
101 {G_UADDE, G_UADDO, G_USUBE, G_USUBO}).lower();
102
103 getActionDefinitionsBuilder({G_SADDO, G_SSUBO}).minScalar(0, sXLen).lower();
104
105 auto &ShiftActions = getActionDefinitionsBuilder({G_ASHR, G_LSHR, G_SHL});
106 if (ST.is64Bit())
107 ShiftActions.customFor({{s32, s32}});
108 ShiftActions.legalFor({{s32, s32}, {s32, sXLen}, {sXLen, sXLen}})
109 .widenScalarToNextPow2(0)
110 .clampScalar(1, s32, sXLen)
111 .clampScalar(0, s32, sXLen)
112 .minScalarSameAs(1, 0);
113
114 if (ST.is64Bit()) {
115 getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT})
116 .legalFor({{sXLen, s32}})
117 .maxScalar(0, sXLen);
118
119 getActionDefinitionsBuilder(G_SEXT_INREG)
120 .customFor({sXLen})
121 .maxScalar(0, sXLen)
122 .lower();
123 } else {
124 getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT}).maxScalar(0, sXLen);
125
126 getActionDefinitionsBuilder(G_SEXT_INREG).maxScalar(0, sXLen).lower();
127 }
128
129 // Merge/Unmerge
130 for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
131 auto &MergeUnmergeActions = getActionDefinitionsBuilder(Op);
132 unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1;
133 unsigned LitTyIdx = Op == G_MERGE_VALUES ? 1 : 0;
134 if (XLen == 32 && ST.hasStdExtD()) {
135 MergeUnmergeActions.legalIf(
136 all(typeIs(BigTyIdx, s64), typeIs(LitTyIdx, s32)));
137 }
138 MergeUnmergeActions.widenScalarToNextPow2(LitTyIdx, XLen)
139 .widenScalarToNextPow2(BigTyIdx, XLen)
140 .clampScalar(LitTyIdx, sXLen, sXLen)
141 .clampScalar(BigTyIdx, sXLen, sXLen);
142 }
143
144 getActionDefinitionsBuilder({G_FSHL, G_FSHR}).lower();
145
146 auto &RotateActions = getActionDefinitionsBuilder({G_ROTL, G_ROTR});
147 if (ST.hasStdExtZbb() || ST.hasStdExtZbkb()) {
148 RotateActions.legalFor({{s32, sXLen}, {sXLen, sXLen}});
149 // Widen s32 rotate amount to s64 so SDAG patterns will match.
150 if (ST.is64Bit())
151 RotateActions.widenScalarIf(all(typeIs(0, s32), typeIs(1, s32)),
152 changeTo(1, sXLen));
153 }
154 RotateActions.lower();
155
156 getActionDefinitionsBuilder(G_BITREVERSE).maxScalar(0, sXLen).lower();
157
158 auto &BSWAPActions = getActionDefinitionsBuilder(G_BSWAP);
159 if (ST.hasStdExtZbb() || ST.hasStdExtZbkb())
160 BSWAPActions.legalFor({sXLen}).clampScalar(0, sXLen, sXLen);
161 else
162 BSWAPActions.maxScalar(0, sXLen).lower();
163
164 auto &CountZerosActions = getActionDefinitionsBuilder({G_CTLZ, G_CTTZ});
165 auto &CountZerosUndefActions =
166 getActionDefinitionsBuilder({G_CTLZ_ZERO_UNDEF, G_CTTZ_ZERO_UNDEF});
167 if (ST.hasStdExtZbb()) {
168 CountZerosActions.legalFor({{s32, s32}, {sXLen, sXLen}})
169 .clampScalar(0, s32, sXLen)
171 .scalarSameSizeAs(1, 0);
172 } else {
173 CountZerosActions.maxScalar(0, sXLen).scalarSameSizeAs(1, 0).lower();
174 CountZerosUndefActions.maxScalar(0, sXLen).scalarSameSizeAs(1, 0);
175 }
176 CountZerosUndefActions.lower();
177
178 auto &CTPOPActions = getActionDefinitionsBuilder(G_CTPOP);
179 if (ST.hasStdExtZbb()) {
180 CTPOPActions.legalFor({{s32, s32}, {sXLen, sXLen}})
181 .clampScalar(0, s32, sXLen)
182 .widenScalarToNextPow2(0)
183 .scalarSameSizeAs(1, 0);
184 } else {
185 CTPOPActions.maxScalar(0, sXLen).scalarSameSizeAs(1, 0).lower();
186 }
187
188 auto &ConstantActions = getActionDefinitionsBuilder(G_CONSTANT);
189 ConstantActions.legalFor({s32, p0});
190 if (ST.is64Bit())
191 ConstantActions.customFor({s64});
192 ConstantActions.widenScalarToNextPow2(0).clampScalar(0, s32, sXLen);
193
194 getActionDefinitionsBuilder(G_IMPLICIT_DEF)
195 .legalFor({s32, sXLen, p0})
196 .widenScalarToNextPow2(0)
197 .clampScalar(0, s32, sXLen);
198
200 .legalFor({{sXLen, sXLen}, {sXLen, p0}})
201 .widenScalarToNextPow2(1)
202 .clampScalar(1, sXLen, sXLen)
203 .clampScalar(0, sXLen, sXLen);
204
205 auto &SelectActions = getActionDefinitionsBuilder(G_SELECT).legalFor(
206 {{s32, sXLen}, {p0, sXLen}});
207 if (XLen == 64 || ST.hasStdExtD())
208 SelectActions.legalFor({{s64, sXLen}});
209 SelectActions.widenScalarToNextPow2(0)
210 .clampScalar(0, s32, (XLen == 64 || ST.hasStdExtD()) ? s64 : s32)
211 .clampScalar(1, sXLen, sXLen);
212
213 auto &LoadStoreActions =
214 getActionDefinitionsBuilder({G_LOAD, G_STORE})
215 .legalForTypesWithMemDesc({{s32, p0, s8, 8},
216 {s32, p0, s16, 16},
217 {s32, p0, s32, 32},
218 {p0, p0, sXLen, XLen}});
219 auto &ExtLoadActions =
220 getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD})
221 .legalForTypesWithMemDesc({{s32, p0, s8, 8}, {s32, p0, s16, 16}});
222 if (XLen == 64) {
223 LoadStoreActions.legalForTypesWithMemDesc({{s64, p0, s8, 8},
224 {s64, p0, s16, 16},
225 {s64, p0, s32, 32},
226 {s64, p0, s64, 64}});
227 ExtLoadActions.legalForTypesWithMemDesc(
228 {{s64, p0, s8, 8}, {s64, p0, s16, 16}, {s64, p0, s32, 32}});
229 } else if (ST.hasStdExtD()) {
230 LoadStoreActions.legalForTypesWithMemDesc({{s64, p0, s64, 64}});
231 }
232 LoadStoreActions.clampScalar(0, s32, sXLen).lower();
233 ExtLoadActions.widenScalarToNextPow2(0).clampScalar(0, s32, sXLen).lower();
234
235 getActionDefinitionsBuilder({G_PTR_ADD, G_PTRMASK}).legalFor({{p0, sXLen}});
236
238 .legalFor({{sXLen, p0}})
239 .clampScalar(0, sXLen, sXLen);
240
242 .legalFor({{p0, sXLen}})
243 .clampScalar(1, sXLen, sXLen);
244
245 getActionDefinitionsBuilder(G_BRCOND).legalFor({sXLen}).minScalar(0, sXLen);
246
247 getActionDefinitionsBuilder(G_BRJT).legalFor({{p0, sXLen}});
248
249 getActionDefinitionsBuilder(G_BRINDIRECT).legalFor({p0});
250
252 .legalFor({p0, sXLen})
253 .widenScalarToNextPow2(0)
254 .clampScalar(0, sXLen, sXLen);
255
256 getActionDefinitionsBuilder({G_GLOBAL_VALUE, G_JUMP_TABLE, G_CONSTANT_POOL})
257 .legalFor({p0});
258
259 if (ST.hasStdExtM() || ST.hasStdExtZmmul()) {
261 .legalFor({s32, sXLen})
262 .widenScalarToNextPow2(0)
263 .clampScalar(0, s32, sXLen);
264
265 // clang-format off
266 getActionDefinitionsBuilder({G_SMULH, G_UMULH})
267 .legalFor({sXLen})
268 .lower();
269 // clang-format on
270
271 getActionDefinitionsBuilder({G_SMULO, G_UMULO}).minScalar(0, sXLen).lower();
272 } else {
274 .libcallFor({sXLen, sDoubleXLen})
275 .widenScalarToNextPow2(0)
276 .clampScalar(0, sXLen, sDoubleXLen);
277
278 getActionDefinitionsBuilder({G_SMULH, G_UMULH}).lowerFor({sXLen});
279
280 getActionDefinitionsBuilder({G_SMULO, G_UMULO})
281 .minScalar(0, sXLen)
282 // Widen sXLen to sDoubleXLen so we can use a single libcall to get
283 // the low bits for the mul result and high bits to do the overflow
284 // check.
285 .widenScalarIf(typeIs(0, sXLen),
286 LegalizeMutations::changeTo(0, sDoubleXLen))
287 .lower();
288 }
289
290 if (ST.hasStdExtM()) {
291 getActionDefinitionsBuilder({G_UDIV, G_SDIV, G_UREM, G_SREM})
292 .legalFor({s32, sXLen})
293 .libcallFor({sDoubleXLen})
294 .clampScalar(0, s32, sDoubleXLen)
296 } else {
297 getActionDefinitionsBuilder({G_UDIV, G_SDIV, G_UREM, G_SREM})
298 .libcallFor({sXLen, sDoubleXLen})
299 .clampScalar(0, sXLen, sDoubleXLen)
301 }
302
303 auto &AbsActions = getActionDefinitionsBuilder(G_ABS);
304 if (ST.hasStdExtZbb())
305 AbsActions.customFor({s32, sXLen}).minScalar(0, sXLen);
306 AbsActions.lower();
307
308 auto &MinMaxActions =
309 getActionDefinitionsBuilder({G_UMAX, G_UMIN, G_SMAX, G_SMIN});
310 if (ST.hasStdExtZbb())
311 MinMaxActions.legalFor({sXLen}).minScalar(0, sXLen);
312 MinMaxActions.lower();
313
314 getActionDefinitionsBuilder(G_FRAME_INDEX).legalFor({p0});
315
316 getActionDefinitionsBuilder({G_MEMCPY, G_MEMMOVE, G_MEMSET}).libcall();
317
318 getActionDefinitionsBuilder(G_DYN_STACKALLOC).lower();
319
320 // FP Operations
321
322 getActionDefinitionsBuilder({G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FMA, G_FNEG,
323 G_FABS, G_FSQRT, G_FMAXNUM, G_FMINNUM})
324 .legalIf(typeIsScalarFPArith(0, ST));
325
326 getActionDefinitionsBuilder(G_FCOPYSIGN)
328
330 [=, &ST](const LegalityQuery &Query) -> bool {
331 return (ST.hasStdExtD() && typeIs(0, s32)(Query) &&
332 typeIs(1, s64)(Query));
333 });
335 [=, &ST](const LegalityQuery &Query) -> bool {
336 return (ST.hasStdExtD() && typeIs(0, s64)(Query) &&
337 typeIs(1, s32)(Query));
338 });
339
341 .legalIf(all(typeIs(0, sXLen), typeIsScalarFPArith(1, ST)))
342 .clampScalar(0, sXLen, sXLen);
343
344 // TODO: Support vector version of G_IS_FPCLASS.
345 getActionDefinitionsBuilder(G_IS_FPCLASS)
346 .customIf(all(typeIs(0, s1), typeIsScalarFPArith(1, ST)));
347
348 getActionDefinitionsBuilder(G_FCONSTANT)
350 .lowerFor({s32, s64});
351
352 getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
353 .legalIf(all(typeInSet(0, {s32, sXLen}), typeIsScalarFPArith(1, ST)))
355 .clampScalar(0, s32, sXLen);
356
357 getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
358 .legalIf(all(typeIsScalarFPArith(0, ST), typeInSet(1, {s32, sXLen})))
360 .clampScalar(1, s32, sXLen);
361
362 // FIXME: We can do custom inline expansion like SelectionDAG.
363 // FIXME: Legal with Zfa.
364 getActionDefinitionsBuilder({G_FCEIL, G_FFLOOR})
365 .libcallFor({s32, s64});
366
367 getActionDefinitionsBuilder(G_VASTART).customFor({p0});
368
369 // va_list must be a pointer, but most sized types are pretty easy to handle
370 // as the destination.
372 // TODO: Implement narrowScalar and widenScalar for G_VAARG for types
373 // outside the [s32, sXLen] range.
374 .clampScalar(0, s32, sXLen)
375 .lowerForCartesianProduct({s32, sXLen, p0}, {p0});
376
378}
379
381 if (Ty.isVector())
383 Ty.getNumElements());
384 return IntegerType::get(C, Ty.getSizeInBits());
385}
386
388 MachineInstr &MI) const {
389 Intrinsic::ID IntrinsicID = cast<GIntrinsic>(MI).getIntrinsicID();
390 switch (IntrinsicID) {
391 default:
392 return false;
393 case Intrinsic::vacopy: {
394 // vacopy arguments must be legal because of the intrinsic signature.
395 // No need to check here.
396
397 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
398 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
399 MachineFunction &MF = *MI.getMF();
400 const DataLayout &DL = MIRBuilder.getDataLayout();
401 LLVMContext &Ctx = MF.getFunction().getContext();
402
403 Register DstLst = MI.getOperand(1).getReg();
404 LLT PtrTy = MRI.getType(DstLst);
405
406 // Load the source va_list
407 Align Alignment = DL.getABITypeAlign(getTypeForLLT(PtrTy, Ctx));
409 MachinePointerInfo(), MachineMemOperand::MOLoad, PtrTy, Alignment);
410 auto Tmp = MIRBuilder.buildLoad(PtrTy, MI.getOperand(2), *LoadMMO);
411
412 // Store the result in the destination va_list
415 MIRBuilder.buildStore(DstLst, Tmp, *StoreMMO);
416
417 MI.eraseFromParent();
418 return true;
419 }
420 }
421}
422
423bool RISCVLegalizerInfo::legalizeShlAshrLshr(
424 MachineInstr &MI, MachineIRBuilder &MIRBuilder,
425 GISelChangeObserver &Observer) const {
426 assert(MI.getOpcode() == TargetOpcode::G_ASHR ||
427 MI.getOpcode() == TargetOpcode::G_LSHR ||
428 MI.getOpcode() == TargetOpcode::G_SHL);
429 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
430 // If the shift amount is a G_CONSTANT, promote it to a 64 bit type so the
431 // imported patterns can select it later. Either way, it will be legal.
432 Register AmtReg = MI.getOperand(2).getReg();
433 auto VRegAndVal = getIConstantVRegValWithLookThrough(AmtReg, MRI);
434 if (!VRegAndVal)
435 return true;
436 // Check the shift amount is in range for an immediate form.
437 uint64_t Amount = VRegAndVal->Value.getZExtValue();
438 if (Amount > 31)
439 return true; // This will have to remain a register variant.
440 auto ExtCst = MIRBuilder.buildConstant(LLT::scalar(64), Amount);
441 Observer.changingInstr(MI);
442 MI.getOperand(2).setReg(ExtCst.getReg(0));
443 Observer.changedInstr(MI);
444 return true;
445}
446
447bool RISCVLegalizerInfo::legalizeVAStart(MachineInstr &MI,
448 MachineIRBuilder &MIRBuilder) const {
449 // Stores the address of the VarArgsFrameIndex slot into the memory location
450 assert(MI.getOpcode() == TargetOpcode::G_VASTART);
451 MachineFunction *MF = MI.getParent()->getParent();
453 int FI = FuncInfo->getVarArgsFrameIndex();
454 LLT AddrTy = MIRBuilder.getMRI()->getType(MI.getOperand(0).getReg());
455 auto FINAddr = MIRBuilder.buildFrameIndex(AddrTy, FI);
456 assert(MI.hasOneMemOperand());
457 MIRBuilder.buildStore(FINAddr, MI.getOperand(0).getReg(),
458 *MI.memoperands()[0]);
459 MI.eraseFromParent();
460 return true;
461}
462
463bool RISCVLegalizerInfo::shouldBeInConstantPool(APInt APImm,
464 bool ShouldOptForSize) const {
465 assert(APImm.getBitWidth() == 32 || APImm.getBitWidth() == 64);
466 int64_t Imm = APImm.getSExtValue();
467 // All simm32 constants should be handled by isel.
468 // NOTE: The getMaxBuildIntsCost call below should return a value >= 2 making
469 // this check redundant, but small immediates are common so this check
470 // should have better compile time.
471 if (isInt<32>(Imm))
472 return false;
473
474 // We only need to cost the immediate, if constant pool lowering is enabled.
476 return false;
477
479 if (Seq.size() <= STI.getMaxBuildIntsCost())
480 return false;
481
482 // Optimizations below are disabled for opt size. If we're optimizing for
483 // size, use a constant pool.
484 if (ShouldOptForSize)
485 return true;
486 //
487 // Special case. See if we can build the constant as (ADD (SLLI X, C), X) do
488 // that if it will avoid a constant pool.
489 // It will require an extra temporary register though.
490 // If we have Zba we can use (ADD_UW X, (SLLI X, 32)) to handle cases where
491 // low and high 32 bits are the same and bit 31 and 63 are set.
492 unsigned ShiftAmt, AddOpc;
494 RISCVMatInt::generateTwoRegInstSeq(Imm, STI, ShiftAmt, AddOpc);
495 return !(!SeqLo.empty() && (SeqLo.size() + 2) <= STI.getMaxBuildIntsCost());
496}
497
500 LostDebugLocObserver &LocObserver) const {
501 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
502 GISelChangeObserver &Observer = Helper.Observer;
503 MachineFunction &MF = *MI.getParent()->getParent();
504 switch (MI.getOpcode()) {
505 default:
506 // No idea what to do.
507 return false;
508 case TargetOpcode::G_ABS:
509 return Helper.lowerAbsToMaxNeg(MI);
510 // TODO: G_FCONSTANT
511 case TargetOpcode::G_CONSTANT: {
512 const Function &F = MF.getFunction();
513 // TODO: if PSI and BFI are present, add " ||
514 // llvm::shouldOptForSize(*CurMBB, PSI, BFI)".
515 bool ShouldOptForSize = F.hasOptSize() || F.hasMinSize();
516 const ConstantInt *ConstVal = MI.getOperand(1).getCImm();
517 if (!shouldBeInConstantPool(ConstVal->getValue(), ShouldOptForSize))
518 return true;
519 return Helper.lowerConstant(MI);
520 }
521 case TargetOpcode::G_SHL:
522 case TargetOpcode::G_ASHR:
523 case TargetOpcode::G_LSHR:
524 return legalizeShlAshrLshr(MI, MIRBuilder, Observer);
525 case TargetOpcode::G_SEXT_INREG: {
526 // Source size of 32 is sext.w.
527 int64_t SizeInBits = MI.getOperand(2).getImm();
528 if (SizeInBits == 32)
529 return true;
530
531 return Helper.lower(MI, 0, /* Unused hint type */ LLT()) ==
533 }
534 case TargetOpcode::G_IS_FPCLASS: {
535 Register GISFPCLASS = MI.getOperand(0).getReg();
536 Register Src = MI.getOperand(1).getReg();
537 const MachineOperand &ImmOp = MI.getOperand(2);
538 MachineIRBuilder MIB(MI);
539
540 // Turn LLVM IR's floating point classes to that in RISC-V,
541 // by simply rotating the 10-bit immediate right by two bits.
542 APInt GFpClassImm(10, static_cast<uint64_t>(ImmOp.getImm()));
543 auto FClassMask = MIB.buildConstant(sXLen, GFpClassImm.rotr(2).zext(XLen));
544 auto ConstZero = MIB.buildConstant(sXLen, 0);
545
546 auto GFClass = MIB.buildInstr(RISCV::G_FCLASS, {sXLen}, {Src});
547 auto And = MIB.buildAnd(sXLen, GFClass, FClassMask);
548 MIB.buildICmp(CmpInst::ICMP_NE, GISFPCLASS, And, ConstZero);
549
550 MI.eraseFromParent();
551 return true;
552 }
553 case TargetOpcode::G_VASTART:
554 return legalizeVAStart(MI, MIRBuilder);
555 }
556
557 llvm_unreachable("expected switch to return");
558}
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static Type * getTypeForLLT(LLT Ty, LLVMContext &C)
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
This file declares the MachineIRBuilder class.
static LegalityPredicate typeIsScalarFPArith(unsigned TypeIdx, const RISCVSubtarget &ST)
This file declares the targeting of the Machinelegalizer class for RISC-V.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Class for arbitrary precision integers.
Definition: APInt.h:76
APInt zext(unsigned width) const
Zero extend to a new width.
Definition: APInt.cpp:981
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1433
APInt rotr(unsigned rotateAmt) const
Rotate right by rotateAmt.
Definition: APInt.cpp:1124
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1507
@ ICMP_NE
not equal
Definition: InstrTypes.h:802
This is the shared class of boolean and integer constants.
Definition: Constants.h:79
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:137
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
Definition: Type.cpp:692
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:342
Abstract class that contains various methods for clients to notify about changes.
virtual void changingInstr(MachineInstr &MI)=0
This instruction is about to be mutated in some way.
virtual void changedInstr(MachineInstr &MI)=0
This instruction was mutated in some way.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:278
constexpr unsigned getScalarSizeInBits() const
Definition: LowLevelType.h:259
static constexpr LLT scalable_vector(unsigned MinNumElements, unsigned ScalarSizeInBits)
Get a low-level scalable vector of some number of elements and element width.
Definition: LowLevelType.h:105
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
Definition: LowLevelType.h:151
constexpr bool isVector() const
Definition: LowLevelType.h:147
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
Definition: LowLevelType.h:49
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:185
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
void computeTables()
Compute any ancillary tables needed to quickly decide how an operation should be handled.
LegalizeRuleSet & legalFor(std::initializer_list< LLT > Types)
The instruction is legal when type index 0 is any type in the given list.
LegalizeRuleSet & scalarSameSizeAs(unsigned TypeIdx, unsigned SameSizeIdx)
Change the type TypeIdx to have the same scalar size as type SameSizeIdx.
LegalizeRuleSet & libcallFor(std::initializer_list< LLT > Types)
LegalizeRuleSet & maxScalar(unsigned TypeIdx, const LLT Ty)
Ensure the scalar is at most as wide as Ty.
LegalizeRuleSet & lower()
The instruction is lowered.
LegalizeRuleSet & lowerFor(std::initializer_list< LLT > Types)
The instruction is lowered when type index 0 is any type in the given list.
LegalizeRuleSet & clampScalar(unsigned TypeIdx, const LLT MinTy, const LLT MaxTy)
Limit the range of scalar sizes to MinTy and MaxTy.
LegalizeRuleSet & minScalarSameAs(unsigned TypeIdx, unsigned LargeTypeIdx)
Widen the scalar to match the size of another.
LegalizeRuleSet & widenScalarIf(LegalityPredicate Predicate, LegalizeMutation Mutation)
Widen the scalar to the one selected by the mutation if the predicate is true.
LegalizeRuleSet & customIf(LegalityPredicate Predicate)
LegalizeRuleSet & widenScalarToNextPow2(unsigned TypeIdx, unsigned MinSize=0)
Widen the scalar to the next power of two that is at least MinSize.
LegalizeRuleSet & lowerForCartesianProduct(std::initializer_list< LLT > Types0, std::initializer_list< LLT > Types1)
The instruction is lowered when type indexes 0 and 1 are both in their respective lists.
LegalizeRuleSet & legalForTypesWithMemDesc(std::initializer_list< LegalityPredicates::TypePairAndMemDesc > TypesAndMemDesc)
The instruction is legal when type indexes 0 and 1 along with the memory size and minimum alignment i...
LegalizeRuleSet & legalIf(LegalityPredicate Predicate)
The instruction is legal if predicate is true.
LegalizeRuleSet & customFor(std::initializer_list< LLT > Types)
LegalizeResult lowerAbsToMaxNeg(MachineInstr &MI)
@ Legalized
Instruction has been legalized and the MachineFunction changed.
LegalizeResult lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty)
Legalize an instruction by splitting it into simpler parts, hopefully understood by the target.
GISelChangeObserver & Observer
To keep track of changes made by the LegalizerHelper.
MachineIRBuilder & MIRBuilder
Expose MIRBuilder so clients can set their own RecordInsertInstruction functions.
LegalizeResult lowerConstant(MachineInstr &MI)
LegalizeRuleSet & getActionDefinitionsBuilder(unsigned Opcode)
Get the action definition builder for the given opcode.
const LegacyLegalizerInfo & getLegacyLegalizerInfo() const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Helper class to build MachineInstr.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineRegisterInfo * getMRI()
Getter for MRI.
const DataLayout & getDataLayout() const
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Representation of each machine instruction.
Definition: MachineInstr.h:68
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
int64_t getImm() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
bool legalizeCustom(LegalizerHelper &Helper, MachineInstr &MI, LostDebugLocObserver &LocObserver) const override
Called for instructions with the Custom LegalizationAction.
bool legalizeIntrinsic(LegalizerHelper &Helper, MachineInstr &MI) const override
RISCVLegalizerInfo(const RISCVSubtarget &ST)
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
unsigned getMaxBuildIntsCost() const
bool useConstantPoolForLargeInts() const
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
LegalityPredicate typeInSet(unsigned TypeIdx, std::initializer_list< LLT > TypesInit)
True iff the given type index is one of the specified types.
Predicate all(Predicate P0, Predicate P1)
True iff P0 and P1 are true.
LegalityPredicate typeIs(unsigned TypeIdx, LLT TypesInit)
True iff the given type index is the specified type.
LegalizeMutation changeTo(unsigned TypeIdx, LLT Ty)
Select this specific type for the given type index.
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
InstSeq generateTwoRegInstSeq(int64_t Val, const MCSubtargetInfo &STI, unsigned &ShiftAmt, unsigned &AddOpc)
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ And
Bitwise or logical AND of integers.
std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
Definition: Utils.cpp:414
std::function< bool(const LegalityQuery &)> LegalityPredicate
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
The LegalityQuery object bundles together all the information that's needed to decide whether a given...
ArrayRef< LLT > Types
This class contains a discriminated union of information about pointers in memory operands,...