31using namespace LegalityPredicates;
32using namespace LegalizeMutations;
36 std::initializer_list<LLT> IntOrFPVecTys,
39 return ST.hasVInstructions() &&
40 (Query.Types[TypeIdx].getScalarSizeInBits() != 64 ||
41 ST.hasVInstructionsI64()) &&
42 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 1 ||
53 return ST.hasVInstructions() &&
54 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 1 ||
61 std::initializer_list<LLT> PtrVecTys,
64 return ST.hasVInstructions() &&
65 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 1 ||
66 ST.getELen() == 64) &&
67 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 16 ||
68 Query.Types[TypeIdx].getScalarSizeInBits() == 32);
74 : STI(ST), XLen(STI.getXLen()), sXLen(
LLT::scalar(XLen)) {
124 using namespace TargetOpcode;
126 auto BoolVecTys = {nxv1s1, nxv2s1, nxv4s1, nxv8s1, nxv16s1, nxv32s1, nxv64s1};
128 auto IntOrFPVecTys = {nxv1s8, nxv2s8, nxv4s8, nxv8s8, nxv16s8, nxv32s8,
129 nxv64s8, nxv1s16, nxv2s16, nxv4s16, nxv8s16, nxv16s16,
130 nxv32s16, nxv1s32, nxv2s32, nxv4s32, nxv8s32, nxv16s32,
131 nxv1s64, nxv2s64, nxv4s64, nxv8s64};
133 auto PtrVecTys = {nxv1p0, nxv2p0, nxv4p0, nxv8p0, nxv16p0};
142 {G_UADDE, G_UADDO, G_USUBE, G_USUBO}).lower();
151 .legalFor({{sXLen, sXLen}})
152 .customFor(ST.is64Bit(), {{s32, s32}})
153 .widenScalarToNextPow2(0)
157 getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT})
158 .legalFor({{s32, s16}})
159 .legalFor(
ST.is64Bit(), {{s64, s16}, {s64, s32}})
165 getActionDefinitionsBuilder(G_SEXT_INREG)
167 .clampScalar(0, sXLen, sXLen)
171 for (
unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
172 auto &MergeUnmergeActions = getActionDefinitionsBuilder(
Op);
173 unsigned BigTyIdx =
Op == G_MERGE_VALUES ? 0 : 1;
174 unsigned LitTyIdx =
Op == G_MERGE_VALUES ? 1 : 0;
175 if (XLen == 32 &&
ST.hasStdExtD()) {
176 MergeUnmergeActions.legalIf(
179 MergeUnmergeActions.widenScalarToNextPow2(LitTyIdx, XLen)
180 .widenScalarToNextPow2(BigTyIdx, XLen)
181 .clampScalar(LitTyIdx, sXLen, sXLen)
182 .clampScalar(BigTyIdx, sXLen, sXLen);
185 getActionDefinitionsBuilder({G_FSHL, G_FSHR}).lower();
187 getActionDefinitionsBuilder({G_ROTR, G_ROTL})
188 .legalFor(
ST.hasStdExtZbb() ||
ST.hasStdExtZbkb(), {{sXLen, sXLen}})
189 .customFor(
ST.is64Bit() && (
ST.hasStdExtZbb() ||
ST.hasStdExtZbkb()),
193 getActionDefinitionsBuilder(G_BITREVERSE).maxScalar(0, sXLen).lower();
195 getActionDefinitionsBuilder(G_BITCAST).legalIf(
201 auto &BSWAPActions = getActionDefinitionsBuilder(G_BSWAP);
202 if (
ST.hasStdExtZbb() ||
ST.hasStdExtZbkb())
203 BSWAPActions.legalFor({sXLen}).clampScalar(0, sXLen, sXLen);
205 BSWAPActions.maxScalar(0, sXLen).lower();
207 auto &CountZerosActions = getActionDefinitionsBuilder({G_CTLZ, G_CTTZ});
208 auto &CountZerosUndefActions =
209 getActionDefinitionsBuilder({G_CTLZ_ZERO_UNDEF, G_CTTZ_ZERO_UNDEF});
210 if (
ST.hasStdExtZbb()) {
211 CountZerosActions.legalFor({{sXLen, sXLen}})
212 .customFor({{s32, s32}})
213 .clampScalar(0, s32, sXLen)
214 .widenScalarToNextPow2(0)
215 .scalarSameSizeAs(1, 0);
217 CountZerosActions.maxScalar(0, sXLen).scalarSameSizeAs(1, 0).lower();
218 CountZerosUndefActions.maxScalar(0, sXLen).scalarSameSizeAs(1, 0);
220 CountZerosUndefActions.lower();
222 auto &CTPOPActions = getActionDefinitionsBuilder(G_CTPOP);
223 if (
ST.hasStdExtZbb()) {
224 CTPOPActions.legalFor({{sXLen, sXLen}})
225 .clampScalar(0, sXLen, sXLen)
226 .scalarSameSizeAs(1, 0);
228 CTPOPActions.maxScalar(0, sXLen).scalarSameSizeAs(1, 0).lower();
231 getActionDefinitionsBuilder(G_CONSTANT)
233 .legalFor(!
ST.is64Bit(), {s32})
234 .customFor(
ST.is64Bit(), {s64})
235 .widenScalarToNextPow2(0)
236 .clampScalar(0, sXLen, sXLen);
239 getActionDefinitionsBuilder(G_FREEZE)
240 .legalFor({s16, s32, p0})
241 .legalFor(
ST.is64Bit(), {s64})
244 .widenScalarToNextPow2(0)
245 .clampScalar(0, s16, sXLen);
249 getActionDefinitionsBuilder(
250 {G_IMPLICIT_DEF, G_CONSTANT_FOLD_BARRIER})
251 .legalFor({s32, sXLen, p0})
254 .widenScalarToNextPow2(0)
255 .clampScalar(0, s32, sXLen);
257 getActionDefinitionsBuilder(G_ICMP)
258 .legalFor({{sXLen, sXLen}, {sXLen, p0}})
261 .widenScalarOrEltToNextPow2OrMinSize(1, 8)
262 .clampScalar(1, sXLen, sXLen)
263 .clampScalar(0, sXLen, sXLen);
265 getActionDefinitionsBuilder(G_SELECT)
266 .legalFor({{s32, sXLen}, {p0, sXLen}})
269 .legalFor(XLen == 64 ||
ST.hasStdExtD(), {{s64, sXLen}})
270 .widenScalarToNextPow2(0)
271 .clampScalar(0, s32, (XLen == 64 ||
ST.hasStdExtD()) ? s64 : s32)
272 .clampScalar(1, sXLen, sXLen);
274 auto &LoadActions = getActionDefinitionsBuilder(G_LOAD);
275 auto &StoreActions = getActionDefinitionsBuilder(G_STORE);
276 auto &ExtLoadActions = getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD});
281 auto getScalarMemAlign = [&
ST](
unsigned Size) {
282 return ST.enableUnalignedScalarMem() ? 8 :
Size;
285 LoadActions.legalForTypesWithMemDesc(
286 {{s16, p0, s8, getScalarMemAlign(8)},
287 {s32, p0, s8, getScalarMemAlign(8)},
288 {s16, p0, s16, getScalarMemAlign(16)},
289 {s32, p0, s16, getScalarMemAlign(16)},
290 {s32, p0, s32, getScalarMemAlign(32)},
291 {p0, p0, sXLen, getScalarMemAlign(XLen)}});
292 StoreActions.legalForTypesWithMemDesc(
293 {{s16, p0, s8, getScalarMemAlign(8)},
294 {s32, p0, s8, getScalarMemAlign(8)},
295 {s16, p0, s16, getScalarMemAlign(16)},
296 {s32, p0, s16, getScalarMemAlign(16)},
297 {s32, p0, s32, getScalarMemAlign(32)},
298 {p0, p0, sXLen, getScalarMemAlign(XLen)}});
299 ExtLoadActions.legalForTypesWithMemDesc(
300 {{sXLen, p0, s8, getScalarMemAlign(8)},
301 {sXLen, p0, s16, getScalarMemAlign(16)}});
303 LoadActions.legalForTypesWithMemDesc(
304 {{s64, p0, s8, getScalarMemAlign(8)},
305 {s64, p0, s16, getScalarMemAlign(16)},
306 {s64, p0, s32, getScalarMemAlign(32)},
307 {s64, p0, s64, getScalarMemAlign(64)}});
308 StoreActions.legalForTypesWithMemDesc(
309 {{s64, p0, s8, getScalarMemAlign(8)},
310 {s64, p0, s16, getScalarMemAlign(16)},
311 {s64, p0, s32, getScalarMemAlign(32)},
312 {s64, p0, s64, getScalarMemAlign(64)}});
313 ExtLoadActions.legalForTypesWithMemDesc(
314 {{s64, p0, s32, getScalarMemAlign(32)}});
315 }
else if (
ST.hasStdExtD()) {
316 LoadActions.legalForTypesWithMemDesc(
317 {{s64, p0, s64, getScalarMemAlign(64)}});
318 StoreActions.legalForTypesWithMemDesc(
319 {{s64, p0, s64, getScalarMemAlign(64)}});
323 if (
ST.hasVInstructions()) {
324 LoadActions.legalForTypesWithMemDesc({{nxv2s8, p0, nxv2s8, 8},
325 {nxv4s8, p0, nxv4s8, 8},
326 {nxv8s8, p0, nxv8s8, 8},
327 {nxv16s8, p0, nxv16s8, 8},
328 {nxv32s8, p0, nxv32s8, 8},
329 {nxv64s8, p0, nxv64s8, 8},
330 {nxv2s16, p0, nxv2s16, 16},
331 {nxv4s16, p0, nxv4s16, 16},
332 {nxv8s16, p0, nxv8s16, 16},
333 {nxv16s16, p0, nxv16s16, 16},
334 {nxv32s16, p0, nxv32s16, 16},
335 {nxv2s32, p0, nxv2s32, 32},
336 {nxv4s32, p0, nxv4s32, 32},
337 {nxv8s32, p0, nxv8s32, 32},
338 {nxv16s32, p0, nxv16s32, 32}});
339 StoreActions.legalForTypesWithMemDesc({{nxv2s8, p0, nxv2s8, 8},
340 {nxv4s8, p0, nxv4s8, 8},
341 {nxv8s8, p0, nxv8s8, 8},
342 {nxv16s8, p0, nxv16s8, 8},
343 {nxv32s8, p0, nxv32s8, 8},
344 {nxv64s8, p0, nxv64s8, 8},
345 {nxv2s16, p0, nxv2s16, 16},
346 {nxv4s16, p0, nxv4s16, 16},
347 {nxv8s16, p0, nxv8s16, 16},
348 {nxv16s16, p0, nxv16s16, 16},
349 {nxv32s16, p0, nxv32s16, 16},
350 {nxv2s32, p0, nxv2s32, 32},
351 {nxv4s32, p0, nxv4s32, 32},
352 {nxv8s32, p0, nxv8s32, 32},
353 {nxv16s32, p0, nxv16s32, 32}});
355 if (
ST.getELen() == 64) {
356 LoadActions.legalForTypesWithMemDesc({{nxv1s8, p0, nxv1s8, 8},
357 {nxv1s16, p0, nxv1s16, 16},
358 {nxv1s32, p0, nxv1s32, 32}});
359 StoreActions.legalForTypesWithMemDesc({{nxv1s8, p0, nxv1s8, 8},
360 {nxv1s16, p0, nxv1s16, 16},
361 {nxv1s32, p0, nxv1s32, 32}});
364 if (
ST.hasVInstructionsI64()) {
365 LoadActions.legalForTypesWithMemDesc({{nxv1s64, p0, nxv1s64, 64},
366 {nxv2s64, p0, nxv2s64, 64},
367 {nxv4s64, p0, nxv4s64, 64},
368 {nxv8s64, p0, nxv8s64, 64}});
369 StoreActions.legalForTypesWithMemDesc({{nxv1s64, p0, nxv1s64, 64},
370 {nxv2s64, p0, nxv2s64, 64},
371 {nxv4s64, p0, nxv4s64, 64},
372 {nxv8s64, p0, nxv8s64, 64}});
381 if (XLen <=
ST.getELen()) {
387 LoadActions.widenScalarToNextPow2(0, 8)
388 .lowerIfMemSizeNotByteSizePow2()
389 .clampScalar(0, s16, sXLen)
392 .clampScalar(0, s16, sXLen)
393 .lowerIfMemSizeNotByteSizePow2()
396 ExtLoadActions.widenScalarToNextPow2(0).clampScalar(0, sXLen, sXLen).lower();
398 getActionDefinitionsBuilder({G_PTR_ADD, G_PTRMASK}).legalFor({{p0, sXLen}});
400 getActionDefinitionsBuilder(G_PTRTOINT)
401 .legalFor({{sXLen, p0}})
402 .clampScalar(0, sXLen, sXLen);
404 getActionDefinitionsBuilder(G_INTTOPTR)
405 .legalFor({{p0, sXLen}})
406 .clampScalar(1, sXLen, sXLen);
408 getActionDefinitionsBuilder(G_BRCOND).legalFor({sXLen}).minScalar(0, sXLen);
410 getActionDefinitionsBuilder(G_BRJT).customFor({{p0, sXLen}});
412 getActionDefinitionsBuilder(G_BRINDIRECT).legalFor({p0});
414 getActionDefinitionsBuilder(G_PHI)
415 .legalFor({p0, s32, sXLen})
416 .widenScalarToNextPow2(0)
417 .clampScalar(0, s32, sXLen);
419 getActionDefinitionsBuilder({G_GLOBAL_VALUE, G_JUMP_TABLE, G_CONSTANT_POOL})
422 if (
ST.hasStdExtZmmul()) {
423 getActionDefinitionsBuilder(G_MUL)
425 .widenScalarToNextPow2(0)
426 .clampScalar(0, sXLen, sXLen);
429 getActionDefinitionsBuilder({G_SMULH, G_UMULH})
434 getActionDefinitionsBuilder({G_SMULO, G_UMULO}).minScalar(0, sXLen).lower();
436 getActionDefinitionsBuilder(G_MUL)
437 .libcallFor({sXLen, sDoubleXLen})
438 .widenScalarToNextPow2(0)
439 .clampScalar(0, sXLen, sDoubleXLen);
441 getActionDefinitionsBuilder({G_SMULH, G_UMULH}).lowerFor({sXLen});
443 getActionDefinitionsBuilder({G_SMULO, G_UMULO})
448 .widenScalarIf(
typeIs(0, sXLen),
453 if (
ST.hasStdExtM()) {
454 getActionDefinitionsBuilder({G_SDIV, G_UDIV, G_UREM})
457 .libcallFor({sDoubleXLen})
458 .clampScalar(0, s32, sDoubleXLen)
459 .widenScalarToNextPow2(0);
460 getActionDefinitionsBuilder(G_SREM)
462 .libcallFor({sDoubleXLen})
463 .clampScalar(0, sXLen, sDoubleXLen)
464 .widenScalarToNextPow2(0);
466 getActionDefinitionsBuilder({G_UDIV, G_SDIV, G_UREM, G_SREM})
467 .libcallFor({sXLen, sDoubleXLen})
468 .clampScalar(0, sXLen, sDoubleXLen)
469 .widenScalarToNextPow2(0);
473 getActionDefinitionsBuilder({G_SDIVREM, G_UDIVREM}).lower();
475 getActionDefinitionsBuilder(G_ABS)
476 .customFor(
ST.hasStdExtZbb(), {sXLen})
477 .minScalar(
ST.hasStdExtZbb(), 0, sXLen)
480 getActionDefinitionsBuilder({G_UMAX, G_UMIN, G_SMAX, G_SMIN})
481 .legalFor(
ST.hasStdExtZbb(), {sXLen})
482 .minScalar(
ST.hasStdExtZbb(), 0, sXLen)
485 getActionDefinitionsBuilder({G_SCMP, G_UCMP}).lower();
487 getActionDefinitionsBuilder(G_FRAME_INDEX).legalFor({p0});
489 getActionDefinitionsBuilder({G_MEMCPY, G_MEMMOVE, G_MEMSET}).
libcall();
491 getActionDefinitionsBuilder({G_DYN_STACKALLOC, G_STACKSAVE, G_STACKRESTORE})
497 getActionDefinitionsBuilder(
498 {G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FMA, G_FSQRT, G_FMAXNUM, G_FMINNUM})
499 .legalFor(
ST.hasStdExtF(), {s32})
500 .legalFor(
ST.hasStdExtD(), {s64})
501 .legalFor(
ST.hasStdExtZfh(), {s16})
502 .libcallFor({s32, s64})
503 .libcallFor(
ST.is64Bit(), {s128});
505 getActionDefinitionsBuilder({G_FNEG, G_FABS})
506 .legalFor(
ST.hasStdExtF(), {s32})
507 .legalFor(
ST.hasStdExtD(), {s64})
508 .legalFor(
ST.hasStdExtZfh(), {s16})
509 .lowerFor({s32, s64, s128});
511 getActionDefinitionsBuilder(G_FREM)
512 .libcallFor({s32, s64})
513 .libcallFor(
ST.is64Bit(), {s128})
517 getActionDefinitionsBuilder(G_FCOPYSIGN)
518 .legalFor(
ST.hasStdExtF(), {{s32, s32}})
519 .legalFor(
ST.hasStdExtD(), {{s64, s64}, {s32, s64}, {s64, s32}})
520 .legalFor(
ST.hasStdExtZfh(), {{s16, s16}, {s16, s32}, {s32, s16}})
521 .legalFor(
ST.hasStdExtZfh() &&
ST.hasStdExtD(), {{s16, s64}, {s64, s16}})
525 getActionDefinitionsBuilder(G_FPTRUNC)
526 .legalFor(
ST.hasStdExtD(), {{s32, s64}})
527 .legalFor(
ST.hasStdExtZfh(), {{s16, s32}})
528 .legalFor(
ST.hasStdExtZfh() &&
ST.hasStdExtD(), {{s16, s64}})
529 .libcallFor({{s32, s64}})
530 .libcallFor(
ST.is64Bit(), {{s32, s128}, {s64, s128}});
531 getActionDefinitionsBuilder(G_FPEXT)
532 .legalFor(
ST.hasStdExtD(), {{s64, s32}})
533 .legalFor(
ST.hasStdExtZfh(), {{s32, s16}})
534 .legalFor(
ST.hasStdExtZfh() &&
ST.hasStdExtD(), {{s64, s16}})
535 .libcallFor({{s64, s32}})
536 .libcallFor(
ST.is64Bit(), {{s128, s32}, {s128, s64}});
538 getActionDefinitionsBuilder(G_FCMP)
539 .legalFor(
ST.hasStdExtF(), {{sXLen, s32}})
540 .legalFor(
ST.hasStdExtD(), {{sXLen, s64}})
541 .legalFor(
ST.hasStdExtZfh(), {{sXLen, s16}})
542 .clampScalar(0, sXLen, sXLen)
543 .libcallFor({{sXLen, s32}, {sXLen, s64}})
544 .libcallFor(
ST.is64Bit(), {{sXLen, s128}});
547 getActionDefinitionsBuilder(G_IS_FPCLASS)
548 .customFor(
ST.hasStdExtF(), {{s1, s32}})
549 .customFor(
ST.hasStdExtD(), {{s1, s64}})
550 .customFor(
ST.hasStdExtZfh(), {{s1, s16}})
551 .lowerFor({{s1, s32}, {s1, s64}});
553 getActionDefinitionsBuilder(G_FCONSTANT)
554 .legalFor(
ST.hasStdExtF(), {s32})
555 .legalFor(
ST.hasStdExtD(), {s64})
556 .legalFor(
ST.hasStdExtZfh(), {s16})
557 .lowerFor({s32, s64, s128});
559 getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
560 .legalFor(
ST.hasStdExtF(), {{sXLen, s32}})
561 .legalFor(
ST.hasStdExtD(), {{sXLen, s64}})
562 .legalFor(
ST.hasStdExtZfh(), {{sXLen, s16}})
563 .customFor(
ST.is64Bit() &&
ST.hasStdExtF(), {{s32, s32}})
564 .customFor(
ST.is64Bit() &&
ST.hasStdExtD(), {{s32, s64}})
565 .customFor(
ST.is64Bit() &&
ST.hasStdExtZfh(), {{s32, s16}})
566 .widenScalarToNextPow2(0)
568 .libcallFor({{s32, s32}, {s64, s32}, {s32, s64}, {s64, s64}})
569 .libcallFor(
ST.is64Bit(), {{s32, s128}, {s64, s128}})
570 .libcallFor(
ST.is64Bit(), {{s128, s32}, {s128, s64}, {s128, s128}});
572 getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
573 .legalFor(
ST.hasStdExtF(), {{s32, sXLen}})
574 .legalFor(
ST.hasStdExtD(), {{s64, sXLen}})
575 .legalFor(
ST.hasStdExtZfh(), {{s16, sXLen}})
576 .widenScalarToNextPow2(1)
580 return Query.
Types[0].isScalar() && Query.
Types[1].isScalar() &&
581 (Query.
Types[1].getSizeInBits() <
ST.getXLen()) &&
582 ((
ST.hasStdExtF() && Query.
Types[0].getSizeInBits() == 32) ||
583 (
ST.hasStdExtD() && Query.
Types[0].getSizeInBits() == 64) ||
584 (
ST.hasStdExtZfh() &&
585 Query.
Types[0].getSizeInBits() == 16));
590 .libcallFor({{s32, s32}, {s64, s32}, {s32, s64}, {s64, s64}})
591 .libcallFor(
ST.is64Bit(), {{s128, s32}, {s128, s64}})
592 .libcallFor(
ST.is64Bit(), {{s32, s128}, {s64, s128}, {s128, s128}});
595 getActionDefinitionsBuilder({G_FCEIL, G_FFLOOR, G_FRINT, G_FNEARBYINT,
596 G_INTRINSIC_TRUNC, G_INTRINSIC_ROUND,
597 G_INTRINSIC_ROUNDEVEN})
598 .legalFor(
ST.hasStdExtZfa(), {s32})
599 .legalFor(
ST.hasStdExtZfa() &&
ST.hasStdExtD(), {s64})
600 .legalFor(
ST.hasStdExtZfa() &&
ST.hasStdExtZfh(), {s16})
601 .libcallFor({s32, s64})
602 .libcallFor(
ST.is64Bit(), {s128});
604 getActionDefinitionsBuilder({G_FMAXIMUM, G_FMINIMUM})
605 .legalFor(
ST.hasStdExtZfa(), {s32})
606 .legalFor(
ST.hasStdExtZfa() &&
ST.hasStdExtD(), {s64})
607 .legalFor(
ST.hasStdExtZfa() &&
ST.hasStdExtZfh(), {s16});
609 getActionDefinitionsBuilder({G_FCOS, G_FSIN, G_FTAN, G_FPOW, G_FLOG, G_FLOG2,
610 G_FLOG10, G_FEXP, G_FEXP2, G_FEXP10, G_FACOS,
611 G_FASIN, G_FATAN, G_FATAN2, G_FCOSH, G_FSINH,
613 .libcallFor({s32, s64})
614 .libcallFor(
ST.is64Bit(), {s128});
615 getActionDefinitionsBuilder({G_FPOWI, G_FLDEXP})
616 .libcallFor({{s32, s32}, {s64, s32}})
617 .libcallFor(
ST.is64Bit(), {s128, s32});
619 getActionDefinitionsBuilder(G_VASTART).customFor({p0});
623 getActionDefinitionsBuilder(G_VAARG)
626 .clampScalar(0, sXLen, sXLen)
627 .lowerForCartesianProduct({sXLen, p0}, {p0});
629 getActionDefinitionsBuilder(G_VSCALE)
630 .clampScalar(0, sXLen, sXLen)
634 getActionDefinitionsBuilder(G_SPLAT_VECTOR)
646 if (
ST.hasVInstructionsF64() &&
ST.hasStdExtD())
647 SplatActions.legalIf(
all(
649 else if (
ST.hasVInstructionsI64())
650 SplatActions.customIf(
all(
654 SplatActions.clampScalar(1, sXLen, sXLen);
663 getActionDefinitionsBuilder(G_EXTRACT_SUBVECTOR)
672 Query.
Types[0].getElementCount().divideCoefficientBy(8), 8);
673 return std::pair(0, CastTy);
681 getActionDefinitionsBuilder(G_INSERT_SUBVECTOR)
687 getLegacyLegalizerInfo().computeTables();
694 switch (IntrinsicID) {
697 case Intrinsic::vacopy: {
708 LLT PtrTy =
MRI.getType(DstLst);
714 auto Tmp = MIRBuilder.
buildLoad(PtrTy,
MI.getOperand(2), *LoadMMO);
719 MIRBuilder.
buildStore(Tmp, DstLst, *StoreMMO);
721 MI.eraseFromParent();
730 assert(
MI.getOpcode() == TargetOpcode::G_VASTART);
737 MIRBuilder.
buildStore(FINAddr,
MI.getOperand(0).getReg(),
738 *
MI.memoperands()[0]);
739 MI.eraseFromParent();
746 auto &MF = *
MI.getParent()->getParent();
751 LLT PtrTy =
MRI.getType(PtrReg);
752 Register IndexReg =
MI.getOperand(2).getReg();
753 LLT IndexTy =
MRI.getType(IndexReg);
759 IndexReg = MIRBuilder.
buildShl(IndexTy, IndexReg, ShiftAmt).
getReg(0);
776 STI.
is64Bit() ? TargetOpcode::G_SEXTLOAD : TargetOpcode::G_LOAD;
794 MI.eraseFromParent();
798bool RISCVLegalizerInfo::shouldBeInConstantPool(
const APInt &APImm,
799 bool ShouldOptForSize)
const {
819 if (ShouldOptForSize)
827 unsigned ShiftAmt, AddOpc;
848 uint64_t Val =
MI.getOperand(1).getCImm()->getZExtValue();
852 auto VLENB = MIB.
buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
854 }
else if (
Log2 > 3) {
855 auto VLENB = MIB.
buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
858 MIB.
buildInstr(RISCV::G_READ_VLENB, {Dst}, {});
860 }
else if ((Val % 8) == 0) {
863 auto VLENB = MIB.
buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
866 auto VLENB = MIB.
buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
870 MI.eraseFromParent();
881 unsigned Opc =
MI.getOpcode();
882 assert(Opc == TargetOpcode::G_ZEXT || Opc == TargetOpcode::G_SEXT ||
883 Opc == TargetOpcode::G_ANYEXT);
889 LLT DstTy =
MRI.getType(Dst);
890 int64_t ExtTrueVal = Opc == TargetOpcode::G_SEXT ? -1 : 1;
897 MI.eraseFromParent();
905 "Machine instructions must be Load/Store.");
912 LLT DataTy =
MRI.getType(DstReg);
916 if (!
MI.hasOneMemOperand())
924 if (TLI->allowsMemoryAccessForAlignment(Ctx,
DL, VT, *MMO))
928 assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
929 "Unexpected unaligned RVV load type");
932 unsigned NumElements =
956 return MIB.
buildInstr(RISCV::G_VMSET_VL, {MaskTy}, {VL});
961static std::pair<MachineInstrBuilder, MachineInstrBuilder>
983 return MIB.
buildInstr(RISCV::G_SPLAT_VECTOR_SPLIT_I64_VL, {Dst},
984 {Passthru,
Lo,
Hi, VL});
994 Unmerge.getReg(1), VL, MIB,
MRI);
1003 assert(
MI.getOpcode() == TargetOpcode::G_SPLAT_VECTOR);
1008 Register SplatVal =
MI.getOperand(1).getReg();
1010 LLT VecTy =
MRI.getType(Dst);
1014 if (XLenTy.getSizeInBits() == 32 &&
1019 MI.eraseFromParent();
1027 MIB.
buildInstr(RISCV::G_VMSET_VL, {Dst}, {VL});
1028 MI.eraseFromParent();
1033 MIB.
buildInstr(RISCV::G_VMCLR_VL, {Dst}, {VL});
1034 MI.eraseFromParent();
1042 auto ZExtSplatVal = MIB.
buildZExt(InterEltTy, SplatVal);
1049 MI.eraseFromParent();
1055 "Unexpected vector LLT");
1061bool RISCVLegalizerInfo::legalizeExtractSubvector(
MachineInstr &
MI,
1076 LLT LitTy =
MRI.getType(Dst);
1077 LLT BigTy =
MRI.getType(Src);
1086 auto BigZExt = MIB.
buildZExt(ExtBigTy, Src);
1091 MI.eraseFromParent();
1102 unsigned RemIdx = Decompose.second;
1119 LLT InterLitTy = BigTy;
1125 assert(Decompose.first != RISCV::NoSubRegister);
1136 auto SlidedownAmt = MIB.
buildVScale(XLenTy, RemIdx);
1140 RISCV::G_VSLIDEDOWN_VL, {InterLitTy},
1141 {MIB.
buildUndef(InterLitTy), Vec, SlidedownAmt, Mask, VL, Policy});
1147 MI.eraseFromParent();
1151bool RISCVLegalizerInfo::legalizeInsertSubvector(
MachineInstr &
MI,
1163 LLT BigTy =
MRI.getType(BigVec);
1164 LLT LitTy =
MRI.getType(LitVec);
1167 MRI.getVRegDef(BigVec)->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1177 if (BigTyMinElts >= 8 && LitTyMinElts >= 8)
1191 unsigned SubRegIdx, RemIdx;
1192 std::tie(SubRegIdx, RemIdx) =
1199 bool ExactlyVecRegSized =
1207 if (RemIdx == 0 && ExactlyVecRegSized)
1225 LLT InterLitTy = BigTy;
1227 unsigned AlignedIdx =
Idx - RemIdx;
1246 bool NeedInsertSubvec =
1249 NeedInsertSubvec ?
MRI.createGenericVirtualRegister(InterLitTy) : Dst;
1252 {AlignedExtract,
Insert, VL});
1254 auto SlideupAmt = MIB.
buildVScale(XLenTy, RemIdx);
1256 VL = MIB.
buildAdd(XLenTy, SlideupAmt, VL);
1266 MIB.
buildInstr(RISCV::G_VSLIDEUP_VL, {InsertedDst},
1267 {AlignedExtract,
Insert, SlideupAmt,
Mask, VL, Policy});
1272 if (NeedInsertSubvec)
1275 MI.eraseFromParent();
1283 case TargetOpcode::G_ASHR:
1284 return RISCV::G_SRAW;
1285 case TargetOpcode::G_LSHR:
1286 return RISCV::G_SRLW;
1287 case TargetOpcode::G_SHL:
1288 return RISCV::G_SLLW;
1289 case TargetOpcode::G_SDIV:
1290 return RISCV::G_DIVW;
1291 case TargetOpcode::G_UDIV:
1292 return RISCV::G_DIVUW;
1293 case TargetOpcode::G_UREM:
1294 return RISCV::G_REMUW;
1295 case TargetOpcode::G_ROTL:
1296 return RISCV::G_ROLW;
1297 case TargetOpcode::G_ROTR:
1298 return RISCV::G_RORW;
1299 case TargetOpcode::G_CTLZ:
1300 return RISCV::G_CLZW;
1301 case TargetOpcode::G_CTTZ:
1302 return RISCV::G_CTZW;
1303 case TargetOpcode::G_FPTOSI:
1304 return RISCV::G_FCVT_W_RV64;
1305 case TargetOpcode::G_FPTOUI:
1306 return RISCV::G_FCVT_WU_RV64;
1316 switch (
MI.getOpcode()) {
1320 case TargetOpcode::G_ABS:
1323 case TargetOpcode::G_CONSTANT: {
1327 bool ShouldOptForSize =
F.hasOptSize() ||
F.hasMinSize();
1329 if (!shouldBeInConstantPool(ConstVal->
getValue(), ShouldOptForSize))
1333 case TargetOpcode::G_SEXT_INREG: {
1334 LLT DstTy =
MRI.getType(
MI.getOperand(0).getReg());
1335 int64_t SizeInBits =
MI.getOperand(2).getImm();
1340 if (STI.hasStdExtZbb() && (SizeInBits == 8 || SizeInBits == 16))
1346 case TargetOpcode::G_ASHR:
1347 case TargetOpcode::G_LSHR:
1348 case TargetOpcode::G_SHL: {
1352 unsigned ExtOpc = TargetOpcode::G_ANYEXT;
1353 if (
MI.getOpcode() == TargetOpcode::G_ASHR)
1354 ExtOpc = TargetOpcode::G_SEXT;
1355 else if (
MI.getOpcode() == TargetOpcode::G_LSHR)
1356 ExtOpc = TargetOpcode::G_ZEXT;
1374 case TargetOpcode::G_SDIV:
1375 case TargetOpcode::G_UDIV:
1376 case TargetOpcode::G_UREM:
1377 case TargetOpcode::G_ROTL:
1378 case TargetOpcode::G_ROTR: {
1387 case TargetOpcode::G_CTLZ:
1388 case TargetOpcode::G_CTTZ: {
1396 case TargetOpcode::G_FPTOSI:
1397 case TargetOpcode::G_FPTOUI: {
1405 case TargetOpcode::G_IS_FPCLASS: {
1406 Register GISFPCLASS =
MI.getOperand(0).getReg();
1417 auto GFClass = MIB.
buildInstr(RISCV::G_FCLASS, {sXLen}, {Src});
1418 auto And = MIB.
buildAnd(sXLen, GFClass, FClassMask);
1421 MI.eraseFromParent();
1424 case TargetOpcode::G_BRJT:
1425 return legalizeBRJT(
MI, MIRBuilder);
1426 case TargetOpcode::G_VASTART:
1427 return legalizeVAStart(
MI, MIRBuilder);
1428 case TargetOpcode::G_VSCALE:
1429 return legalizeVScale(
MI, MIRBuilder);
1430 case TargetOpcode::G_ZEXT:
1431 case TargetOpcode::G_SEXT:
1432 case TargetOpcode::G_ANYEXT:
1433 return legalizeExt(
MI, MIRBuilder);
1434 case TargetOpcode::G_SPLAT_VECTOR:
1435 return legalizeSplatVector(
MI, MIRBuilder);
1436 case TargetOpcode::G_EXTRACT_SUBVECTOR:
1437 return legalizeExtractSubvector(
MI, MIRBuilder);
1438 case TargetOpcode::G_INSERT_SUBVECTOR:
1439 return legalizeInsertSubvector(
MI, Helper, MIRBuilder);
1440 case TargetOpcode::G_LOAD:
1441 case TargetOpcode::G_STORE:
1442 return legalizeLoadStore(
MI, Helper, MIRBuilder);
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
This file declares the MachineIRBuilder class.
unsigned const TargetRegisterInfo * TRI
static LLT getLMUL1Ty(LLT VecTy)
static MachineInstrBuilder buildAllOnesMask(LLT VecTy, const SrcOp &VL, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Creates an all ones mask suitable for masking a vector of type VecTy with vector length VL.
static std::pair< MachineInstrBuilder, MachineInstrBuilder > buildDefaultVLOps(LLT VecTy, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Gets the two common "VL" operands: an all-ones mask and the vector length.
static LegalityPredicate typeIsLegalBoolVec(unsigned TypeIdx, std::initializer_list< LLT > BoolVecTys, const RISCVSubtarget &ST)
static MachineInstrBuilder buildSplatSplitS64WithVL(const DstOp &Dst, const SrcOp &Passthru, const SrcOp &Scalar, const SrcOp &VL, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
static LegalityPredicate typeIsLegalIntOrFPVec(unsigned TypeIdx, std::initializer_list< LLT > IntOrFPVecTys, const RISCVSubtarget &ST)
static MachineInstrBuilder buildSplatPartsS64WithVL(const DstOp &Dst, const SrcOp &Passthru, Register Lo, Register Hi, const SrcOp &VL, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
static LLT getMaskTypeFor(LLT VecTy)
Return the type of the mask type suitable for masking the provided vector type.
static LegalityPredicate typeIsLegalPtrVec(unsigned TypeIdx, std::initializer_list< LLT > PtrVecTys, const RISCVSubtarget &ST)
static unsigned getRISCVWOpcode(unsigned Opcode)
This file declares the targeting of the Machinelegalizer class for RISC-V.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Class for arbitrary precision integers.
APInt zext(unsigned width) const
Zero extend to a new width.
unsigned getBitWidth() const
Return the number of bits in the APInt.
APInt rotr(unsigned rotateAmt) const
Rotate right by rotateAmt.
int64_t getSExtValue() const
Get sign extended value.
This is the shared class of boolean and integer constants.
const APInt & getValue() const
Return the constant as an APInt value reference.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
static constexpr ElementCount getScalable(ScalarTy MinVal)
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
virtual void changingInstr(MachineInstr &MI)=0
This instruction is about to be mutated in some way.
virtual void changedInstr(MachineInstr &MI)=0
This instruction was mutated in some way.
Represents a insert subvector.
Register getSubVec() const
Register getBigVec() const
uint64_t getIndexImm() const
Register getReg(unsigned Idx) const
Access the Idx'th operand as a register and return it.
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
constexpr unsigned getScalarSizeInBits() const
static constexpr LLT scalable_vector(unsigned MinNumElements, unsigned ScalarSizeInBits)
Get a low-level scalable vector of some number of elements and element width.
constexpr LLT changeElementType(LLT NewEltTy) const
If this type is a vector, return a vector with the same number of elements but the new element type.
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
constexpr ElementCount getElementCount() const
This is an important class for using LLVM in a threaded context.
LegalizeRuleSet & maxScalar(unsigned TypeIdx, const LLT Ty)
Ensure the scalar is at most as wide as Ty.
LegalizeRuleSet & lower()
The instruction is lowered.
LegalizeRuleSet & clampScalar(unsigned TypeIdx, const LLT MinTy, const LLT MaxTy)
Limit the range of scalar sizes to MinTy and MaxTy.
LegalizeRuleSet & customIf(LegalityPredicate Predicate)
LegalizeRuleSet & widenScalarToNextPow2(unsigned TypeIdx, unsigned MinSize=0)
Widen the scalar to the next power of two that is at least MinSize.
LegalizeRuleSet & customFor(std::initializer_list< LLT > Types)
void widenScalarSrc(MachineInstr &MI, LLT WideTy, unsigned OpIdx, unsigned ExtOpcode)
Legalize a single operand OpIdx of the machine instruction MI as a Use by extending the operand's typ...
LegalizeResult lowerAbsToMaxNeg(MachineInstr &MI)
LegalizeResult bitcast(MachineInstr &MI, unsigned TypeIdx, LLT Ty)
Legalize an instruction by replacing the value type.
@ Legalized
Instruction has been legalized and the MachineFunction changed.
LegalizeResult lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty)
Legalize an instruction by splitting it into simpler parts, hopefully understood by the target.
GISelChangeObserver & Observer
To keep track of changes made by the LegalizerHelper.
LegalizeResult widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy)
Legalize an instruction by performing the operation on a wider scalar type (for example a 16-bit addi...
MachineIRBuilder & MIRBuilder
Expose MIRBuilder so clients can set their own RecordInsertInstruction functions.
LegalizeResult lowerConstant(MachineInstr &MI)
void widenScalarDst(MachineInstr &MI, LLT WideTy, unsigned OpIdx=0, unsigned TruncOpcode=TargetOpcode::G_TRUNC)
Legalize a single operand OpIdx of the machine instruction MI as a Def by extending the operand's typ...
LegalizeRuleSet & getActionDefinitionsBuilder(unsigned Opcode)
Get the action definition builder for the given opcode.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
Helper class to build MachineInstr.
MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ADD Op0, Op1.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_MUL Op0, Op1.
MachineInstrBuilder buildInsertSubvector(const DstOp &Res, const SrcOp &Src0, const SrcOp &Src1, unsigned Index)
Build and insert Res = G_INSERT_SUBVECTOR Src0, Src1, Idx.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
const TargetInstrInfo & getTII()
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildLShr(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ZEXT Op.
MachineInstrBuilder buildVScale(const DstOp &Res, unsigned MinElts)
Build and insert Res = G_VSCALE MinElts.
MachineInstrBuilder buildIntToPtr(const DstOp &Dst, const SrcOp &Src)
Build and insert a G_INTTOPTR instruction.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildExtractSubvector(const DstOp &Res, const SrcOp &Src, unsigned Index)
Build and insert Res = G_EXTRACT_SUBVECTOR Src, Idx0.
const DataLayout & getDataLayout() const
MachineInstrBuilder buildBrIndirect(Register Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val)
Build and insert Res = G_SPLAT_VECTOR Val.
MachineInstrBuilder buildLoadInstr(unsigned Opcode, const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = <opcode> Addr, MMO.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Register getReg(unsigned Idx) const
Get the register for the operand index.
Representation of each machine instruction.
unsigned getEntrySize(const DataLayout &TD) const
getEntrySize - Return the size of each entry in the jump table.
@ EK_LabelDifference32
EK_LabelDifference32 - Each entry is the address of the block minus the address of the jump table.
@ EK_Custom32
EK_Custom32 - Each entry is a 32-bit value that is custom lowered by the TargetLowering::LowerCustomJ...
@ EK_BlockAddress
EK_BlockAddress - Each entry is a plain address of block, e.g.: .word LBB123.
unsigned getEntryAlignment(const DataLayout &TD) const
getEntryAlignment - Return the alignment of each entry in the jump table.
JTEntryKind getEntryKind() const
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
static MachineOperand CreateImm(int64_t Val)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
bool legalizeCustom(LegalizerHelper &Helper, MachineInstr &MI, LostDebugLocObserver &LocObserver) const override
Called for instructions with the Custom LegalizationAction.
bool legalizeIntrinsic(LegalizerHelper &Helper, MachineInstr &MI) const override
RISCVLegalizerInfo(const RISCVSubtarget &ST)
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
int getVarArgsFrameIndex() const
unsigned getRealMinVLen() const
Quantity expandVScale(Quantity X) const
If the ElementCount or TypeSize X is scalable and VScale (VLEN) is exactly known, returns X converted...
unsigned getMaxBuildIntsCost() const
bool useConstantPoolForLargeInts() const
const RISCVRegisterInfo * getRegisterInfo() const override
const RISCVTargetLowering * getTargetLowering() const override
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
static RISCVII::VLMUL getLMUL(MVT VT)
Wrapper class representing virtual and physical registers.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
LegalityPredicate typeInSet(unsigned TypeIdx, std::initializer_list< LLT > TypesInit)
True iff the given type index is one of the specified types.
Predicate any(Predicate P0, Predicate P1)
True iff P0 or P1 are true.
Predicate all(Predicate P0, Predicate P1)
True iff P0 and P1 are true.
LegalityPredicate typeIs(unsigned TypeIdx, LLT TypesInit)
True iff the given type index is the specified type.
LegalizeMutation changeTo(unsigned TypeIdx, LLT Ty)
Select this specific type for the given type index.
@ TAIL_UNDISTURBED_MASK_UNDISTURBED
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
InstSeq generateTwoRegInstSeq(int64_t Val, const MCSubtargetInfo &STI, unsigned &ShiftAmt, unsigned &AddOpc)
std::pair< unsigned, bool > decodeVLMUL(RISCVII::VLMUL VLMUL)
static constexpr unsigned RVVBitsPerBlock
This is an optimization pass for GlobalISel generic memory operations.
Type * getTypeForLLT(LLT Ty, LLVMContext &C)
Get the type back from LLT.
bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...
MVT getMVTForLLT(LLT Ty)
Get a rough equivalent of an MVT for a given LLT.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
@ And
Bitwise or logical AND of integers.
std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
unsigned Log2(Align A)
Returns the log2 of the alignment.
std::function< bool(const LegalityQuery &)> LegalityPredicate
This struct is a compact representation of a valid (non-zero power of two) alignment.
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
The LegalityQuery object bundles together all the information that's needed to decide whether a given...
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getJumpTable(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a jump table entry.