32using namespace LegalityPredicates;
33using namespace LegalizeMutations;
37 std::initializer_list<LLT> IntOrFPVecTys,
40 return ST.hasVInstructions() &&
41 (Query.Types[TypeIdx].getScalarSizeInBits() != 64 ||
42 ST.hasVInstructionsI64()) &&
43 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 1 ||
54 return ST.hasVInstructions() &&
55 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 1 ||
62 std::initializer_list<LLT> PtrVecTys,
65 return ST.hasVInstructions() &&
66 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 1 ||
67 ST.getELen() == 64) &&
68 (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 16 ||
69 Query.Types[TypeIdx].getScalarSizeInBits() == 32);
75 : STI(ST), XLen(STI.getXLen()), sXLen(
LLT::scalar(XLen)) {
125 using namespace TargetOpcode;
127 auto BoolVecTys = {nxv1s1, nxv2s1, nxv4s1, nxv8s1, nxv16s1, nxv32s1, nxv64s1};
129 auto IntOrFPVecTys = {nxv1s8, nxv2s8, nxv4s8, nxv8s8, nxv16s8, nxv32s8,
130 nxv64s8, nxv1s16, nxv2s16, nxv4s16, nxv8s16, nxv16s16,
131 nxv32s16, nxv1s32, nxv2s32, nxv4s32, nxv8s32, nxv16s32,
132 nxv1s64, nxv2s64, nxv4s64, nxv8s64};
134 auto PtrVecTys = {nxv1p0, nxv2p0, nxv4p0, nxv8p0, nxv16p0};
150 {G_UADDE, G_UADDO, G_USUBE, G_USUBO}).lower();
159 .legalFor({{sXLen, sXLen}})
160 .customFor(ST.is64Bit(), {{s32, s32}})
161 .widenScalarToNextPow2(0)
165 getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT})
166 .legalFor({{s32, s16}})
167 .legalFor(
ST.is64Bit(), {{s64, s16}, {s64, s32}})
173 getActionDefinitionsBuilder(G_SEXT_INREG)
175 .clampScalar(0, sXLen, sXLen)
179 for (
unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
180 auto &MergeUnmergeActions = getActionDefinitionsBuilder(
Op);
181 unsigned BigTyIdx =
Op == G_MERGE_VALUES ? 0 : 1;
182 unsigned LitTyIdx =
Op == G_MERGE_VALUES ? 1 : 0;
183 if (XLen == 32 &&
ST.hasStdExtD()) {
184 MergeUnmergeActions.legalIf(
187 MergeUnmergeActions.widenScalarToNextPow2(LitTyIdx, XLen)
188 .widenScalarToNextPow2(BigTyIdx, XLen)
189 .clampScalar(LitTyIdx, sXLen, sXLen)
190 .clampScalar(BigTyIdx, sXLen, sXLen);
193 getActionDefinitionsBuilder({G_FSHL, G_FSHR}).lower();
195 getActionDefinitionsBuilder({G_ROTR, G_ROTL})
196 .legalFor(
ST.hasStdExtZbb() ||
ST.hasStdExtZbkb(), {{sXLen, sXLen}})
197 .customFor(
ST.is64Bit() && (
ST.hasStdExtZbb() ||
ST.hasStdExtZbkb()),
201 getActionDefinitionsBuilder(G_BITREVERSE).maxScalar(0, sXLen).lower();
203 getActionDefinitionsBuilder(G_BITCAST).legalIf(
209 auto &BSWAPActions = getActionDefinitionsBuilder(G_BSWAP);
210 if (
ST.hasStdExtZbb() ||
ST.hasStdExtZbkb())
211 BSWAPActions.legalFor({sXLen}).clampScalar(0, sXLen, sXLen);
213 BSWAPActions.maxScalar(0, sXLen).lower();
215 auto &CountZerosActions = getActionDefinitionsBuilder({G_CTLZ, G_CTTZ});
216 auto &CountZerosUndefActions =
217 getActionDefinitionsBuilder({G_CTLZ_ZERO_UNDEF, G_CTTZ_ZERO_UNDEF});
218 if (
ST.hasStdExtZbb()) {
219 CountZerosActions.legalFor({{sXLen, sXLen}})
220 .customFor({{s32, s32}})
221 .clampScalar(0, s32, sXLen)
222 .widenScalarToNextPow2(0)
223 .scalarSameSizeAs(1, 0);
225 CountZerosActions.maxScalar(0, sXLen).scalarSameSizeAs(1, 0).lower();
226 CountZerosUndefActions.maxScalar(0, sXLen).scalarSameSizeAs(1, 0);
228 CountZerosUndefActions.lower();
230 auto &CTPOPActions = getActionDefinitionsBuilder(G_CTPOP);
231 if (
ST.hasStdExtZbb()) {
232 CTPOPActions.legalFor({{sXLen, sXLen}})
233 .clampScalar(0, sXLen, sXLen)
234 .scalarSameSizeAs(1, 0);
236 CTPOPActions.maxScalar(0, sXLen).scalarSameSizeAs(1, 0).lower();
239 getActionDefinitionsBuilder(G_CONSTANT)
241 .legalFor(!
ST.is64Bit(), {s32})
242 .customFor(
ST.is64Bit(), {s64})
243 .widenScalarToNextPow2(0)
244 .clampScalar(0, sXLen, sXLen);
247 getActionDefinitionsBuilder(G_FREEZE)
248 .legalFor({s16, s32, p0})
249 .legalFor(
ST.is64Bit(), {s64})
252 .widenScalarToNextPow2(0)
253 .clampScalar(0, s16, sXLen);
257 getActionDefinitionsBuilder(
258 {G_IMPLICIT_DEF, G_CONSTANT_FOLD_BARRIER})
259 .legalFor({s32, sXLen, p0})
262 .widenScalarToNextPow2(0)
263 .clampScalar(0, s32, sXLen);
265 getActionDefinitionsBuilder(G_ICMP)
266 .legalFor({{sXLen, sXLen}, {sXLen, p0}})
269 .widenScalarOrEltToNextPow2OrMinSize(1, 8)
270 .clampScalar(1, sXLen, sXLen)
271 .clampScalar(0, sXLen, sXLen);
273 getActionDefinitionsBuilder(G_SELECT)
274 .legalFor({{s32, sXLen}, {p0, sXLen}})
277 .legalFor(XLen == 64 ||
ST.hasStdExtD(), {{s64, sXLen}})
278 .widenScalarToNextPow2(0)
279 .clampScalar(0, s32, (XLen == 64 ||
ST.hasStdExtD()) ? s64 : s32)
280 .clampScalar(1, sXLen, sXLen);
282 auto &LoadActions = getActionDefinitionsBuilder(G_LOAD);
283 auto &StoreActions = getActionDefinitionsBuilder(G_STORE);
284 auto &ExtLoadActions = getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD});
289 auto getScalarMemAlign = [&
ST](
unsigned Size) {
290 return ST.enableUnalignedScalarMem() ? 8 :
Size;
293 LoadActions.legalForTypesWithMemDesc(
294 {{s16, p0, s8, getScalarMemAlign(8)},
295 {s32, p0, s8, getScalarMemAlign(8)},
296 {s16, p0, s16, getScalarMemAlign(16)},
297 {s32, p0, s16, getScalarMemAlign(16)},
298 {s32, p0, s32, getScalarMemAlign(32)},
299 {p0, p0, sXLen, getScalarMemAlign(XLen)}});
300 StoreActions.legalForTypesWithMemDesc(
301 {{s16, p0, s8, getScalarMemAlign(8)},
302 {s32, p0, s8, getScalarMemAlign(8)},
303 {s16, p0, s16, getScalarMemAlign(16)},
304 {s32, p0, s16, getScalarMemAlign(16)},
305 {s32, p0, s32, getScalarMemAlign(32)},
306 {p0, p0, sXLen, getScalarMemAlign(XLen)}});
307 ExtLoadActions.legalForTypesWithMemDesc(
308 {{sXLen, p0, s8, getScalarMemAlign(8)},
309 {sXLen, p0, s16, getScalarMemAlign(16)}});
311 LoadActions.legalForTypesWithMemDesc(
312 {{s64, p0, s8, getScalarMemAlign(8)},
313 {s64, p0, s16, getScalarMemAlign(16)},
314 {s64, p0, s32, getScalarMemAlign(32)},
315 {s64, p0, s64, getScalarMemAlign(64)}});
316 StoreActions.legalForTypesWithMemDesc(
317 {{s64, p0, s8, getScalarMemAlign(8)},
318 {s64, p0, s16, getScalarMemAlign(16)},
319 {s64, p0, s32, getScalarMemAlign(32)},
320 {s64, p0, s64, getScalarMemAlign(64)}});
321 ExtLoadActions.legalForTypesWithMemDesc(
322 {{s64, p0, s32, getScalarMemAlign(32)}});
323 }
else if (
ST.hasStdExtD()) {
324 LoadActions.legalForTypesWithMemDesc(
325 {{s64, p0, s64, getScalarMemAlign(64)}});
326 StoreActions.legalForTypesWithMemDesc(
327 {{s64, p0, s64, getScalarMemAlign(64)}});
331 if (
ST.hasVInstructions()) {
332 LoadActions.legalForTypesWithMemDesc({{nxv2s8, p0, nxv2s8, 8},
333 {nxv4s8, p0, nxv4s8, 8},
334 {nxv8s8, p0, nxv8s8, 8},
335 {nxv16s8, p0, nxv16s8, 8},
336 {nxv32s8, p0, nxv32s8, 8},
337 {nxv64s8, p0, nxv64s8, 8},
338 {nxv2s16, p0, nxv2s16, 16},
339 {nxv4s16, p0, nxv4s16, 16},
340 {nxv8s16, p0, nxv8s16, 16},
341 {nxv16s16, p0, nxv16s16, 16},
342 {nxv32s16, p0, nxv32s16, 16},
343 {nxv2s32, p0, nxv2s32, 32},
344 {nxv4s32, p0, nxv4s32, 32},
345 {nxv8s32, p0, nxv8s32, 32},
346 {nxv16s32, p0, nxv16s32, 32}});
347 StoreActions.legalForTypesWithMemDesc({{nxv2s8, p0, nxv2s8, 8},
348 {nxv4s8, p0, nxv4s8, 8},
349 {nxv8s8, p0, nxv8s8, 8},
350 {nxv16s8, p0, nxv16s8, 8},
351 {nxv32s8, p0, nxv32s8, 8},
352 {nxv64s8, p0, nxv64s8, 8},
353 {nxv2s16, p0, nxv2s16, 16},
354 {nxv4s16, p0, nxv4s16, 16},
355 {nxv8s16, p0, nxv8s16, 16},
356 {nxv16s16, p0, nxv16s16, 16},
357 {nxv32s16, p0, nxv32s16, 16},
358 {nxv2s32, p0, nxv2s32, 32},
359 {nxv4s32, p0, nxv4s32, 32},
360 {nxv8s32, p0, nxv8s32, 32},
361 {nxv16s32, p0, nxv16s32, 32}});
363 if (
ST.getELen() == 64) {
364 LoadActions.legalForTypesWithMemDesc({{nxv1s8, p0, nxv1s8, 8},
365 {nxv1s16, p0, nxv1s16, 16},
366 {nxv1s32, p0, nxv1s32, 32}});
367 StoreActions.legalForTypesWithMemDesc({{nxv1s8, p0, nxv1s8, 8},
368 {nxv1s16, p0, nxv1s16, 16},
369 {nxv1s32, p0, nxv1s32, 32}});
372 if (
ST.hasVInstructionsI64()) {
373 LoadActions.legalForTypesWithMemDesc({{nxv1s64, p0, nxv1s64, 64},
374 {nxv2s64, p0, nxv2s64, 64},
375 {nxv4s64, p0, nxv4s64, 64},
376 {nxv8s64, p0, nxv8s64, 64}});
377 StoreActions.legalForTypesWithMemDesc({{nxv1s64, p0, nxv1s64, 64},
378 {nxv2s64, p0, nxv2s64, 64},
379 {nxv4s64, p0, nxv4s64, 64},
380 {nxv8s64, p0, nxv8s64, 64}});
389 if (XLen <=
ST.getELen()) {
395 LoadActions.widenScalarToNextPow2(0, 8)
396 .lowerIfMemSizeNotByteSizePow2()
397 .clampScalar(0, s16, sXLen)
400 .clampScalar(0, s16, sXLen)
401 .lowerIfMemSizeNotByteSizePow2()
404 ExtLoadActions.widenScalarToNextPow2(0).clampScalar(0, sXLen, sXLen).lower();
406 getActionDefinitionsBuilder({G_PTR_ADD, G_PTRMASK}).legalFor({{p0, sXLen}});
408 getActionDefinitionsBuilder(G_PTRTOINT)
409 .legalFor({{sXLen, p0}})
410 .clampScalar(0, sXLen, sXLen);
412 getActionDefinitionsBuilder(G_INTTOPTR)
413 .legalFor({{p0, sXLen}})
414 .clampScalar(1, sXLen, sXLen);
416 getActionDefinitionsBuilder(G_BRCOND).legalFor({sXLen}).minScalar(0, sXLen);
418 getActionDefinitionsBuilder(G_BRJT).customFor({{p0, sXLen}});
420 getActionDefinitionsBuilder(G_BRINDIRECT).legalFor({p0});
422 getActionDefinitionsBuilder(G_PHI)
423 .legalFor({p0, s32, sXLen})
424 .widenScalarToNextPow2(0)
425 .clampScalar(0, s32, sXLen);
427 getActionDefinitionsBuilder({G_GLOBAL_VALUE, G_JUMP_TABLE, G_CONSTANT_POOL})
430 if (
ST.hasStdExtZmmul()) {
431 getActionDefinitionsBuilder(G_MUL)
433 .widenScalarToNextPow2(0)
434 .clampScalar(0, sXLen, sXLen);
437 getActionDefinitionsBuilder({G_SMULH, G_UMULH})
442 getActionDefinitionsBuilder({G_SMULO, G_UMULO}).minScalar(0, sXLen).lower();
444 getActionDefinitionsBuilder(G_MUL)
445 .libcallFor({sXLen, sDoubleXLen})
446 .widenScalarToNextPow2(0)
447 .clampScalar(0, sXLen, sDoubleXLen);
449 getActionDefinitionsBuilder({G_SMULH, G_UMULH}).lowerFor({sXLen});
451 getActionDefinitionsBuilder({G_SMULO, G_UMULO})
456 .widenScalarIf(
typeIs(0, sXLen),
461 if (
ST.hasStdExtM()) {
462 getActionDefinitionsBuilder({G_SDIV, G_UDIV, G_UREM})
465 .libcallFor({sDoubleXLen})
466 .clampScalar(0, s32, sDoubleXLen)
467 .widenScalarToNextPow2(0);
468 getActionDefinitionsBuilder(G_SREM)
470 .libcallFor({sDoubleXLen})
471 .clampScalar(0, sXLen, sDoubleXLen)
472 .widenScalarToNextPow2(0);
474 getActionDefinitionsBuilder({G_UDIV, G_SDIV, G_UREM, G_SREM})
475 .libcallFor({sXLen, sDoubleXLen})
476 .clampScalar(0, sXLen, sDoubleXLen)
477 .widenScalarToNextPow2(0);
481 getActionDefinitionsBuilder({G_SDIVREM, G_UDIVREM}).lower();
483 getActionDefinitionsBuilder(G_ABS)
484 .customFor(
ST.hasStdExtZbb(), {sXLen})
485 .minScalar(
ST.hasStdExtZbb(), 0, sXLen)
488 getActionDefinitionsBuilder({G_UMAX, G_UMIN, G_SMAX, G_SMIN})
489 .legalFor(
ST.hasStdExtZbb(), {sXLen})
490 .minScalar(
ST.hasStdExtZbb(), 0, sXLen)
493 getActionDefinitionsBuilder({G_SCMP, G_UCMP}).lower();
495 getActionDefinitionsBuilder(G_FRAME_INDEX).legalFor({p0});
497 getActionDefinitionsBuilder({G_MEMCPY, G_MEMMOVE, G_MEMSET}).
libcall();
499 getActionDefinitionsBuilder({G_DYN_STACKALLOC, G_STACKSAVE, G_STACKRESTORE})
505 getActionDefinitionsBuilder(
506 {G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FMA, G_FSQRT, G_FMAXNUM, G_FMINNUM})
507 .legalFor(
ST.hasStdExtF(), {s32})
508 .legalFor(
ST.hasStdExtD(), {s64})
509 .legalFor(
ST.hasStdExtZfh(), {s16})
510 .libcallFor({s32, s64})
511 .libcallFor(
ST.is64Bit(), {s128});
513 getActionDefinitionsBuilder({G_FNEG, G_FABS})
514 .legalFor(
ST.hasStdExtF(), {s32})
515 .legalFor(
ST.hasStdExtD(), {s64})
516 .legalFor(
ST.hasStdExtZfh(), {s16})
517 .lowerFor({s32, s64, s128});
519 getActionDefinitionsBuilder(G_FREM)
520 .libcallFor({s32, s64})
521 .libcallFor(
ST.is64Bit(), {s128})
525 getActionDefinitionsBuilder(G_FCOPYSIGN)
526 .legalFor(
ST.hasStdExtF(), {{s32, s32}})
527 .legalFor(
ST.hasStdExtD(), {{s64, s64}, {s32, s64}, {s64, s32}})
528 .legalFor(
ST.hasStdExtZfh(), {{s16, s16}, {s16, s32}, {s32, s16}})
529 .legalFor(
ST.hasStdExtZfh() &&
ST.hasStdExtD(), {{s16, s64}, {s64, s16}})
533 getActionDefinitionsBuilder(G_FPTRUNC)
534 .legalFor(
ST.hasStdExtD(), {{s32, s64}})
535 .legalFor(
ST.hasStdExtZfh(), {{s16, s32}})
536 .legalFor(
ST.hasStdExtZfh() &&
ST.hasStdExtD(), {{s16, s64}})
537 .libcallFor({{s32, s64}})
538 .libcallFor(
ST.is64Bit(), {{s32, s128}, {s64, s128}});
539 getActionDefinitionsBuilder(G_FPEXT)
540 .legalFor(
ST.hasStdExtD(), {{s64, s32}})
541 .legalFor(
ST.hasStdExtZfh(), {{s32, s16}})
542 .legalFor(
ST.hasStdExtZfh() &&
ST.hasStdExtD(), {{s64, s16}})
543 .libcallFor({{s64, s32}})
544 .libcallFor(
ST.is64Bit(), {{s128, s32}, {s128, s64}});
546 getActionDefinitionsBuilder(G_FCMP)
547 .legalFor(
ST.hasStdExtF(), {{sXLen, s32}})
548 .legalFor(
ST.hasStdExtD(), {{sXLen, s64}})
549 .legalFor(
ST.hasStdExtZfh(), {{sXLen, s16}})
550 .clampScalar(0, sXLen, sXLen)
551 .libcallFor({{sXLen, s32}, {sXLen, s64}})
552 .libcallFor(
ST.is64Bit(), {{sXLen, s128}});
555 getActionDefinitionsBuilder(G_IS_FPCLASS)
556 .customFor(
ST.hasStdExtF(), {{s1, s32}})
557 .customFor(
ST.hasStdExtD(), {{s1, s64}})
558 .customFor(
ST.hasStdExtZfh(), {{s1, s16}})
559 .lowerFor({{s1, s32}, {s1, s64}});
561 getActionDefinitionsBuilder(G_FCONSTANT)
562 .legalFor(
ST.hasStdExtF(), {s32})
563 .legalFor(
ST.hasStdExtD(), {s64})
564 .legalFor(
ST.hasStdExtZfh(), {s16})
565 .lowerFor({s32, s64, s128});
567 getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
568 .legalFor(
ST.hasStdExtF(), {{sXLen, s32}})
569 .legalFor(
ST.hasStdExtD(), {{sXLen, s64}})
570 .legalFor(
ST.hasStdExtZfh(), {{sXLen, s16}})
571 .customFor(
ST.is64Bit() &&
ST.hasStdExtF(), {{s32, s32}})
572 .customFor(
ST.is64Bit() &&
ST.hasStdExtD(), {{s32, s64}})
573 .customFor(
ST.is64Bit() &&
ST.hasStdExtZfh(), {{s32, s16}})
574 .widenScalarToNextPow2(0)
576 .libcallFor({{s32, s32}, {s64, s32}, {s32, s64}, {s64, s64}})
577 .libcallFor(
ST.is64Bit(), {{s32, s128}, {s64, s128}})
578 .libcallFor(
ST.is64Bit(), {{s128, s32}, {s128, s64}, {s128, s128}});
580 getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
581 .legalFor(
ST.hasStdExtF(), {{s32, sXLen}})
582 .legalFor(
ST.hasStdExtD(), {{s64, sXLen}})
583 .legalFor(
ST.hasStdExtZfh(), {{s16, sXLen}})
584 .widenScalarToNextPow2(1)
588 return Query.
Types[0].isScalar() && Query.
Types[1].isScalar() &&
589 (Query.
Types[1].getSizeInBits() <
ST.getXLen()) &&
590 ((
ST.hasStdExtF() && Query.
Types[0].getSizeInBits() == 32) ||
591 (
ST.hasStdExtD() && Query.
Types[0].getSizeInBits() == 64) ||
592 (
ST.hasStdExtZfh() &&
593 Query.
Types[0].getSizeInBits() == 16));
598 .libcallFor({{s32, s32}, {s64, s32}, {s32, s64}, {s64, s64}})
599 .libcallFor(
ST.is64Bit(), {{s128, s32}, {s128, s64}})
600 .libcallFor(
ST.is64Bit(), {{s32, s128}, {s64, s128}, {s128, s128}});
603 getActionDefinitionsBuilder({G_FCEIL, G_FFLOOR, G_FRINT, G_FNEARBYINT,
604 G_INTRINSIC_TRUNC, G_INTRINSIC_ROUND,
605 G_INTRINSIC_ROUNDEVEN})
606 .legalFor(
ST.hasStdExtZfa(), {s32})
607 .legalFor(
ST.hasStdExtZfa() &&
ST.hasStdExtD(), {s64})
608 .legalFor(
ST.hasStdExtZfa() &&
ST.hasStdExtZfh(), {s16})
609 .libcallFor({s32, s64})
610 .libcallFor(
ST.is64Bit(), {s128});
612 getActionDefinitionsBuilder({G_FMAXIMUM, G_FMINIMUM})
613 .legalFor(
ST.hasStdExtZfa(), {s32})
614 .legalFor(
ST.hasStdExtZfa() &&
ST.hasStdExtD(), {s64})
615 .legalFor(
ST.hasStdExtZfa() &&
ST.hasStdExtZfh(), {s16});
617 getActionDefinitionsBuilder({G_FCOS, G_FSIN, G_FTAN, G_FPOW, G_FLOG, G_FLOG2,
618 G_FLOG10, G_FEXP, G_FEXP2, G_FEXP10, G_FACOS,
619 G_FASIN, G_FATAN, G_FATAN2, G_FCOSH, G_FSINH,
621 .libcallFor({s32, s64})
622 .libcallFor(
ST.is64Bit(), {s128});
623 getActionDefinitionsBuilder({G_FPOWI, G_FLDEXP})
624 .libcallFor({{s32, s32}, {s64, s32}})
625 .libcallFor(
ST.is64Bit(), {s128, s32});
627 getActionDefinitionsBuilder(G_VASTART).customFor({p0});
631 getActionDefinitionsBuilder(G_VAARG)
634 .clampScalar(0, sXLen, sXLen)
635 .lowerForCartesianProduct({sXLen, p0}, {p0});
637 getActionDefinitionsBuilder(G_VSCALE)
638 .clampScalar(0, sXLen, sXLen)
642 getActionDefinitionsBuilder(G_SPLAT_VECTOR)
654 if (
ST.hasVInstructionsF64() &&
ST.hasStdExtD())
655 SplatActions.legalIf(
all(
657 else if (
ST.hasVInstructionsI64())
658 SplatActions.customIf(
all(
662 SplatActions.clampScalar(1, sXLen, sXLen);
671 getActionDefinitionsBuilder(G_EXTRACT_SUBVECTOR)
680 Query.
Types[0].getElementCount().divideCoefficientBy(8), 8);
681 return std::pair(0, CastTy);
689 getActionDefinitionsBuilder(G_INSERT_SUBVECTOR)
695 getLegacyLegalizerInfo().computeTables();
702 switch (IntrinsicID) {
705 case Intrinsic::vacopy: {
716 LLT PtrTy =
MRI.getType(DstLst);
722 auto Tmp = MIRBuilder.
buildLoad(PtrTy,
MI.getOperand(2), *LoadMMO);
727 MIRBuilder.
buildStore(Tmp, DstLst, *StoreMMO);
729 MI.eraseFromParent();
738 assert(
MI.getOpcode() == TargetOpcode::G_VASTART);
745 MIRBuilder.
buildStore(FINAddr,
MI.getOperand(0).getReg(),
746 *
MI.memoperands()[0]);
747 MI.eraseFromParent();
754 auto &MF = *
MI.getParent()->getParent();
759 LLT PtrTy =
MRI.getType(PtrReg);
760 Register IndexReg =
MI.getOperand(2).getReg();
761 LLT IndexTy =
MRI.getType(IndexReg);
767 IndexReg = MIRBuilder.
buildShl(IndexTy, IndexReg, ShiftAmt).
getReg(0);
784 STI.
is64Bit() ? TargetOpcode::G_SEXTLOAD : TargetOpcode::G_LOAD;
802 MI.eraseFromParent();
806bool RISCVLegalizerInfo::shouldBeInConstantPool(
const APInt &APImm,
807 bool ShouldOptForSize)
const {
827 if (ShouldOptForSize)
835 unsigned ShiftAmt, AddOpc;
856 uint64_t Val =
MI.getOperand(1).getCImm()->getZExtValue();
860 auto VLENB = MIB.
buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
862 }
else if (
Log2 > 3) {
863 auto VLENB = MIB.
buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
866 MIB.
buildInstr(RISCV::G_READ_VLENB, {Dst}, {});
868 }
else if ((Val % 8) == 0) {
871 auto VLENB = MIB.
buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
874 auto VLENB = MIB.
buildInstr(RISCV::G_READ_VLENB, {XLenTy}, {});
878 MI.eraseFromParent();
889 unsigned Opc =
MI.getOpcode();
890 assert(Opc == TargetOpcode::G_ZEXT || Opc == TargetOpcode::G_SEXT ||
891 Opc == TargetOpcode::G_ANYEXT);
897 LLT DstTy =
MRI.getType(Dst);
898 int64_t ExtTrueVal = Opc == TargetOpcode::G_SEXT ? -1 : 1;
905 MI.eraseFromParent();
913 "Machine instructions must be Load/Store.");
920 LLT DataTy =
MRI.getType(DstReg);
924 if (!
MI.hasOneMemOperand())
932 if (TLI->allowsMemoryAccessForAlignment(Ctx,
DL, VT, *MMO))
936 assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
937 "Unexpected unaligned RVV load type");
940 unsigned NumElements =
964 return MIB.
buildInstr(RISCV::G_VMSET_VL, {MaskTy}, {VL});
969static std::pair<MachineInstrBuilder, MachineInstrBuilder>
991 return MIB.
buildInstr(RISCV::G_SPLAT_VECTOR_SPLIT_I64_VL, {Dst},
992 {Passthru,
Lo,
Hi, VL});
1002 Unmerge.getReg(1), VL, MIB,
MRI);
1011 assert(
MI.getOpcode() == TargetOpcode::G_SPLAT_VECTOR);
1016 Register SplatVal =
MI.getOperand(1).getReg();
1018 LLT VecTy =
MRI.getType(Dst);
1022 if (XLenTy.getSizeInBits() == 32 &&
1027 MI.eraseFromParent();
1035 MIB.
buildInstr(RISCV::G_VMSET_VL, {Dst}, {VL});
1036 MI.eraseFromParent();
1041 MIB.
buildInstr(RISCV::G_VMCLR_VL, {Dst}, {VL});
1042 MI.eraseFromParent();
1050 auto ZExtSplatVal = MIB.
buildZExt(InterEltTy, SplatVal);
1057 MI.eraseFromParent();
1063 "Unexpected vector LLT");
1069bool RISCVLegalizerInfo::legalizeExtractSubvector(
MachineInstr &
MI,
1084 LLT LitTy =
MRI.getType(Dst);
1085 LLT BigTy =
MRI.getType(Src);
1094 auto BigZExt = MIB.
buildZExt(ExtBigTy, Src);
1099 MI.eraseFromParent();
1110 unsigned RemIdx = Decompose.second;
1127 LLT InterLitTy = BigTy;
1133 assert(Decompose.first != RISCV::NoSubRegister);
1144 auto SlidedownAmt = MIB.
buildVScale(XLenTy, RemIdx);
1148 RISCV::G_VSLIDEDOWN_VL, {InterLitTy},
1149 {MIB.
buildUndef(InterLitTy), Vec, SlidedownAmt, Mask, VL, Policy});
1155 MI.eraseFromParent();
1159bool RISCVLegalizerInfo::legalizeInsertSubvector(
MachineInstr &
MI,
1171 LLT BigTy =
MRI.getType(BigVec);
1172 LLT LitTy =
MRI.getType(LitVec);
1175 MRI.getVRegDef(BigVec)->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1185 if (BigTyMinElts >= 8 && LitTyMinElts >= 8)
1199 unsigned SubRegIdx, RemIdx;
1200 std::tie(SubRegIdx, RemIdx) =
1207 bool ExactlyVecRegSized =
1215 if (RemIdx == 0 && ExactlyVecRegSized)
1233 LLT InterLitTy = BigTy;
1235 unsigned AlignedIdx =
Idx - RemIdx;
1254 bool NeedInsertSubvec =
1257 NeedInsertSubvec ?
MRI.createGenericVirtualRegister(InterLitTy) : Dst;
1260 {AlignedExtract,
Insert, VL});
1262 auto SlideupAmt = MIB.
buildVScale(XLenTy, RemIdx);
1264 VL = MIB.
buildAdd(XLenTy, SlideupAmt, VL);
1274 MIB.
buildInstr(RISCV::G_VSLIDEUP_VL, {InsertedDst},
1275 {AlignedExtract,
Insert, SlideupAmt,
Mask, VL, Policy});
1280 if (NeedInsertSubvec)
1283 MI.eraseFromParent();
1291 case TargetOpcode::G_ASHR:
1292 return RISCV::G_SRAW;
1293 case TargetOpcode::G_LSHR:
1294 return RISCV::G_SRLW;
1295 case TargetOpcode::G_SHL:
1296 return RISCV::G_SLLW;
1297 case TargetOpcode::G_SDIV:
1298 return RISCV::G_DIVW;
1299 case TargetOpcode::G_UDIV:
1300 return RISCV::G_DIVUW;
1301 case TargetOpcode::G_UREM:
1302 return RISCV::G_REMUW;
1303 case TargetOpcode::G_ROTL:
1304 return RISCV::G_ROLW;
1305 case TargetOpcode::G_ROTR:
1306 return RISCV::G_RORW;
1307 case TargetOpcode::G_CTLZ:
1308 return RISCV::G_CLZW;
1309 case TargetOpcode::G_CTTZ:
1310 return RISCV::G_CTZW;
1311 case TargetOpcode::G_FPTOSI:
1312 return RISCV::G_FCVT_W_RV64;
1313 case TargetOpcode::G_FPTOUI:
1314 return RISCV::G_FCVT_WU_RV64;
1324 switch (
MI.getOpcode()) {
1328 case TargetOpcode::G_ABS:
1331 case TargetOpcode::G_CONSTANT: {
1335 bool ShouldOptForSize =
F.hasOptSize() ||
F.hasMinSize();
1337 if (!shouldBeInConstantPool(ConstVal->
getValue(), ShouldOptForSize))
1341 case TargetOpcode::G_SUB:
1342 case TargetOpcode::G_ADD: {
1347 Register DstALU =
MRI.createGenericVirtualRegister(sXLen);
1353 MIRBuilder.
buildInstr(TargetOpcode::G_TRUNC, {MO}, {DstSext});
1359 case TargetOpcode::G_SEXT_INREG: {
1360 LLT DstTy =
MRI.getType(
MI.getOperand(0).getReg());
1361 int64_t SizeInBits =
MI.getOperand(2).getImm();
1366 if (STI.hasStdExtZbb() && (SizeInBits == 8 || SizeInBits == 16))
1372 case TargetOpcode::G_ASHR:
1373 case TargetOpcode::G_LSHR:
1374 case TargetOpcode::G_SHL: {
1378 unsigned ExtOpc = TargetOpcode::G_ANYEXT;
1379 if (
MI.getOpcode() == TargetOpcode::G_ASHR)
1380 ExtOpc = TargetOpcode::G_SEXT;
1381 else if (
MI.getOpcode() == TargetOpcode::G_LSHR)
1382 ExtOpc = TargetOpcode::G_ZEXT;
1400 case TargetOpcode::G_SDIV:
1401 case TargetOpcode::G_UDIV:
1402 case TargetOpcode::G_UREM:
1403 case TargetOpcode::G_ROTL:
1404 case TargetOpcode::G_ROTR: {
1413 case TargetOpcode::G_CTLZ:
1414 case TargetOpcode::G_CTTZ: {
1422 case TargetOpcode::G_FPTOSI:
1423 case TargetOpcode::G_FPTOUI: {
1431 case TargetOpcode::G_IS_FPCLASS: {
1432 Register GISFPCLASS =
MI.getOperand(0).getReg();
1443 auto GFClass = MIB.
buildInstr(RISCV::G_FCLASS, {sXLen}, {Src});
1444 auto And = MIB.
buildAnd(sXLen, GFClass, FClassMask);
1447 MI.eraseFromParent();
1450 case TargetOpcode::G_BRJT:
1451 return legalizeBRJT(
MI, MIRBuilder);
1452 case TargetOpcode::G_VASTART:
1453 return legalizeVAStart(
MI, MIRBuilder);
1454 case TargetOpcode::G_VSCALE:
1455 return legalizeVScale(
MI, MIRBuilder);
1456 case TargetOpcode::G_ZEXT:
1457 case TargetOpcode::G_SEXT:
1458 case TargetOpcode::G_ANYEXT:
1459 return legalizeExt(
MI, MIRBuilder);
1460 case TargetOpcode::G_SPLAT_VECTOR:
1461 return legalizeSplatVector(
MI, MIRBuilder);
1462 case TargetOpcode::G_EXTRACT_SUBVECTOR:
1463 return legalizeExtractSubvector(
MI, MIRBuilder);
1464 case TargetOpcode::G_INSERT_SUBVECTOR:
1465 return legalizeInsertSubvector(
MI, Helper, MIRBuilder);
1466 case TargetOpcode::G_LOAD:
1467 case TargetOpcode::G_STORE:
1468 return legalizeLoadStore(
MI, Helper, MIRBuilder);
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
This file declares the MachineIRBuilder class.
unsigned const TargetRegisterInfo * TRI
static LLT getLMUL1Ty(LLT VecTy)
static MachineInstrBuilder buildAllOnesMask(LLT VecTy, const SrcOp &VL, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Creates an all ones mask suitable for masking a vector of type VecTy with vector length VL.
static std::pair< MachineInstrBuilder, MachineInstrBuilder > buildDefaultVLOps(LLT VecTy, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Gets the two common "VL" operands: an all-ones mask and the vector length.
static LegalityPredicate typeIsLegalBoolVec(unsigned TypeIdx, std::initializer_list< LLT > BoolVecTys, const RISCVSubtarget &ST)
static MachineInstrBuilder buildSplatSplitS64WithVL(const DstOp &Dst, const SrcOp &Passthru, const SrcOp &Scalar, const SrcOp &VL, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
static LegalityPredicate typeIsLegalIntOrFPVec(unsigned TypeIdx, std::initializer_list< LLT > IntOrFPVecTys, const RISCVSubtarget &ST)
static MachineInstrBuilder buildSplatPartsS64WithVL(const DstOp &Dst, const SrcOp &Passthru, Register Lo, Register Hi, const SrcOp &VL, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
static LLT getMaskTypeFor(LLT VecTy)
Return the type of the mask type suitable for masking the provided vector type.
static LegalityPredicate typeIsLegalPtrVec(unsigned TypeIdx, std::initializer_list< LLT > PtrVecTys, const RISCVSubtarget &ST)
static unsigned getRISCVWOpcode(unsigned Opcode)
This file declares the targeting of the Machinelegalizer class for RISC-V.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
Class for arbitrary precision integers.
APInt zext(unsigned width) const
Zero extend to a new width.
unsigned getBitWidth() const
Return the number of bits in the APInt.
APInt rotr(unsigned rotateAmt) const
Rotate right by rotateAmt.
int64_t getSExtValue() const
Get sign extended value.
This is the shared class of boolean and integer constants.
const APInt & getValue() const
Return the constant as an APInt value reference.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
static constexpr ElementCount getScalable(ScalarTy MinVal)
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
virtual void changingInstr(MachineInstr &MI)=0
This instruction is about to be mutated in some way.
virtual void changedInstr(MachineInstr &MI)=0
This instruction was mutated in some way.
Represents a insert subvector.
Register getSubVec() const
Register getBigVec() const
uint64_t getIndexImm() const
Register getReg(unsigned Idx) const
Access the Idx'th operand as a register and return it.
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
constexpr unsigned getScalarSizeInBits() const
static constexpr LLT scalable_vector(unsigned MinNumElements, unsigned ScalarSizeInBits)
Get a low-level scalable vector of some number of elements and element width.
constexpr LLT changeElementType(LLT NewEltTy) const
If this type is a vector, return a vector with the same number of elements but the new element type.
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
constexpr ElementCount getElementCount() const
This is an important class for using LLVM in a threaded context.
LegalizeRuleSet & maxScalar(unsigned TypeIdx, const LLT Ty)
Ensure the scalar is at most as wide as Ty.
LegalizeRuleSet & lower()
The instruction is lowered.
LegalizeRuleSet & clampScalar(unsigned TypeIdx, const LLT MinTy, const LLT MaxTy)
Limit the range of scalar sizes to MinTy and MaxTy.
LegalizeRuleSet & customIf(LegalityPredicate Predicate)
LegalizeRuleSet & widenScalarToNextPow2(unsigned TypeIdx, unsigned MinSize=0)
Widen the scalar to the next power of two that is at least MinSize.
LegalizeRuleSet & customFor(std::initializer_list< LLT > Types)
void widenScalarSrc(MachineInstr &MI, LLT WideTy, unsigned OpIdx, unsigned ExtOpcode)
Legalize a single operand OpIdx of the machine instruction MI as a Use by extending the operand's typ...
LegalizeResult lowerAbsToMaxNeg(MachineInstr &MI)
LegalizeResult bitcast(MachineInstr &MI, unsigned TypeIdx, LLT Ty)
Legalize an instruction by replacing the value type.
@ Legalized
Instruction has been legalized and the MachineFunction changed.
LegalizeResult lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty)
Legalize an instruction by splitting it into simpler parts, hopefully understood by the target.
GISelChangeObserver & Observer
To keep track of changes made by the LegalizerHelper.
LegalizeResult widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy)
Legalize an instruction by performing the operation on a wider scalar type (for example a 16-bit addi...
MachineIRBuilder & MIRBuilder
Expose MIRBuilder so clients can set their own RecordInsertInstruction functions.
LegalizeResult lowerConstant(MachineInstr &MI)
void widenScalarDst(MachineInstr &MI, LLT WideTy, unsigned OpIdx=0, unsigned TruncOpcode=TargetOpcode::G_TRUNC)
Legalize a single operand OpIdx of the machine instruction MI as a Def by extending the operand's typ...
LegalizeRuleSet & getActionDefinitionsBuilder(unsigned Opcode)
Get the action definition builder for the given opcode.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
Helper class to build MachineInstr.
void setInsertPt(MachineBasicBlock &MBB, MachineBasicBlock::iterator II)
Set the insertion point before the specified position.
MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ADD Op0, Op1.
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_MUL Op0, Op1.
MachineInstrBuilder buildInsertSubvector(const DstOp &Res, const SrcOp &Src0, const SrcOp &Src1, unsigned Index)
Build and insert Res = G_INSERT_SUBVECTOR Src0, Src1, Idx.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
const TargetInstrInfo & getTII()
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildLShr(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ZEXT Op.
MachineInstrBuilder buildVScale(const DstOp &Res, unsigned MinElts)
Build and insert Res = G_VSCALE MinElts.
MachineInstrBuilder buildIntToPtr(const DstOp &Dst, const SrcOp &Src)
Build and insert a G_INTTOPTR instruction.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildShl(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineFunction & getMF()
Getter for the function we currently build.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildExtractSubvector(const DstOp &Res, const SrcOp &Src, unsigned Index)
Build and insert Res = G_EXTRACT_SUBVECTOR Src, Idx0.
const DataLayout & getDataLayout() const
MachineInstrBuilder buildBrIndirect(Register Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val)
Build and insert Res = G_SPLAT_VECTOR Val.
MachineInstrBuilder buildLoadInstr(unsigned Opcode, const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = <opcode> Addr, MMO.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildSExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)
Build and insert Res = G_SEXT_INREG Op, ImmOp.
Register getReg(unsigned Idx) const
Get the register for the operand index.
Representation of each machine instruction.
unsigned getEntrySize(const DataLayout &TD) const
getEntrySize - Return the size of each entry in the jump table.
@ EK_LabelDifference32
EK_LabelDifference32 - Each entry is the address of the block minus the address of the jump table.
@ EK_Custom32
EK_Custom32 - Each entry is a 32-bit value that is custom lowered by the TargetLowering::LowerCustomJ...
@ EK_BlockAddress
EK_BlockAddress - Each entry is a plain address of block, e.g.: .word LBB123.
unsigned getEntryAlignment(const DataLayout &TD) const
getEntryAlignment - Return the alignment of each entry in the jump table.
JTEntryKind getEntryKind() const
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
void setReg(Register Reg)
Change the register this operand corresponds to.
static MachineOperand CreateImm(int64_t Val)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
bool legalizeCustom(LegalizerHelper &Helper, MachineInstr &MI, LostDebugLocObserver &LocObserver) const override
Called for instructions with the Custom LegalizationAction.
bool legalizeIntrinsic(LegalizerHelper &Helper, MachineInstr &MI) const override
RISCVLegalizerInfo(const RISCVSubtarget &ST)
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
int getVarArgsFrameIndex() const
unsigned getRealMinVLen() const
Quantity expandVScale(Quantity X) const
If the ElementCount or TypeSize X is scalable and VScale (VLEN) is exactly known, returns X converted...
unsigned getMaxBuildIntsCost() const
bool useConstantPoolForLargeInts() const
const RISCVRegisterInfo * getRegisterInfo() const override
const RISCVTargetLowering * getTargetLowering() const override
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
static RISCVII::VLMUL getLMUL(MVT VT)
Wrapper class representing virtual and physical registers.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
LegalityPredicate typeInSet(unsigned TypeIdx, std::initializer_list< LLT > TypesInit)
True iff the given type index is one of the specified types.
Predicate any(Predicate P0, Predicate P1)
True iff P0 or P1 are true.
Predicate all(Predicate P0, Predicate P1)
True iff P0 and P1 are true.
LegalityPredicate typeIs(unsigned TypeIdx, LLT TypesInit)
True iff the given type index is the specified type.
LegalizeMutation changeTo(unsigned TypeIdx, LLT Ty)
Select this specific type for the given type index.
@ TAIL_UNDISTURBED_MASK_UNDISTURBED
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
InstSeq generateTwoRegInstSeq(int64_t Val, const MCSubtargetInfo &STI, unsigned &ShiftAmt, unsigned &AddOpc)
std::pair< unsigned, bool > decodeVLMUL(RISCVII::VLMUL VLMUL)
static constexpr unsigned RVVBitsPerBlock
This is an optimization pass for GlobalISel generic memory operations.
Type * getTypeForLLT(LLT Ty, LLVMContext &C)
Get the type back from LLT.
bool isAllOnesOrAllOnesSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant -1 integer or a splatted vector of a constant -1 integer (with...
MVT getMVTForLLT(LLT Ty)
Get a rough equivalent of an MVT for a given LLT.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI, bool AllowUndefs=false)
Return true if the value is a constant 0 integer or a splatted vector of a constant 0 integer (with n...
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
@ And
Bitwise or logical AND of integers.
std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
unsigned Log2(Align A)
Returns the log2 of the alignment.
std::function< bool(const LegalityQuery &)> LegalityPredicate
This struct is a compact representation of a valid (non-zero power of two) alignment.
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
The LegalityQuery object bundles together all the information that's needed to decide whether a given...
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getJumpTable(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a jump table entry.