30#include "llvm/IR/IntrinsicsAArch64.h"
33#include <initializer_list>
35#define DEBUG_TYPE "aarch64-legalinfo"
38using namespace LegalizeActions;
39using namespace LegalizeMutations;
40using namespace LegalityPredicates;
41using namespace MIPatternMatch;
45 using namespace TargetOpcode;
69 std::initializer_list<LLT> PackedVectorAllTypeList = {
75 std::initializer_list<LLT> ScalarAndPtrTypesList = {s8, s16, s32, s64, p0};
79 const TargetMachine &TM = ST.getTargetLowering()->getTargetMachine();
82 if (!ST.hasNEON() || !ST.hasFPARMv8()) {
89 const bool HasFP16 = ST.hasFullFP16();
90 const LLT &MinFPScalar = HasFP16 ? s16 : s32;
92 const bool HasCSSC = ST.hasCSSC();
93 const bool HasRCPC3 = ST.hasRCPC3();
96 {G_IMPLICIT_DEF, G_FREEZE, G_CONSTANT_FOLD_BARRIER})
97 .legalFor({p0, s8, s16, s32, s64})
98 .legalFor(PackedVectorAllTypeList)
110 .legalFor(PackedVectorAllTypeList)
121 .
legalFor({s32, s64, v4s16, v8s16, v2s32, v4s32, v2s64})
123 .clampScalar(0, s32, s64)
124 .clampNumElements(0, v4s16, v8s16)
125 .clampNumElements(0, v2s32, v4s32)
126 .clampNumElements(0, v2s64, v2s64)
127 .moreElementsToNextPow2(0);
130 .legalFor({s32, s64, v2s32, v2s64, v4s32, v4s16, v8s16, v16s8, v8s8})
131 .widenScalarToNextPow2(0)
139 return Query.
Types[0].getNumElements() <= 2;
144 return Query.
Types[0].getNumElements() <= 4;
149 return Query.
Types[0].getNumElements() <= 16;
156 const auto &SrcTy = Query.
Types[0];
157 const auto &AmtTy = Query.
Types[1];
158 return !SrcTy.isVector() && SrcTy.getSizeInBits() == 32 &&
159 AmtTy.getSizeInBits() == 32;
173 .widenScalarToNextPow2(0)
184 .
legalFor({{p0, s64}, {v2p0, v2s64}})
185 .clampScalarOrElt(1, s64, s64)
191 .legalFor({s32, s64})
193 .clampScalar(0, s32, s64)
198 .lowerFor({s8, s16, s32, s64, v2s64, v4s32, v2s32})
200 .clampScalarOrElt(0, s32, s64)
201 .clampNumElements(0, v2s32, v4s32)
202 .clampNumElements(0, v2s64, v2s64)
203 .moreElementsToNextPow2(0);
207 .widenScalarToNextPow2(0, 32)
212 .legalFor({s64, v8s16, v16s8, v4s32})
216 {G_SMIN, G_SMAX, G_UMIN, G_UMAX});
219 .
legalFor({s32, s64, v8s8, v16s8, v4s16, v8s16, v2s32, v4s32})
226 .
legalFor({v8s8, v16s8, v4s16, v8s16, v2s32, v4s32});
237 {G_SADDE, G_SSUBE, G_UADDE, G_USUBE, G_SADDO, G_SSUBO, G_UADDO, G_USUBO})
238 .legalFor({{s32, s32}, {s64, s32}})
239 .clampScalar(0, s32, s64)
244 G_FABS, G_FSQRT, G_FMAXNUM, G_FMINNUM,
245 G_FMAXIMUM, G_FMINIMUM, G_FCEIL, G_FFLOOR,
246 G_FRINT, G_FNEARBYINT, G_INTRINSIC_TRUNC,
247 G_INTRINSIC_ROUND, G_INTRINSIC_ROUNDEVEN})
248 .legalFor({MinFPScalar, s32, s64, v2s32, v4s32, v2s64})
250 const auto &Ty = Query.
Types[0];
251 return (Ty == v8s16 || Ty == v4s16) && HasFP16;
254 .minScalarOrElt(0, MinFPScalar)
266 .legalFor({{s64, MinFPScalar}, {s64, s32}, {s64, s64}})
267 .libcallFor({{s64, s128}})
268 .minScalarOrElt(1, MinFPScalar);
271 {G_FCOS, G_FSIN, G_FPOW, G_FLOG, G_FLOG2, G_FLOG10, G_FTAN, G_FEXP,
272 G_FEXP2, G_FEXP10, G_FACOS, G_FASIN, G_FATAN, G_FCOSH, G_FSINH, G_FTANH})
277 .libcallFor({s32, s64});
305 for (
unsigned Op : {G_SEXTLOAD, G_ZEXTLOAD}) {
308 if (
Op == G_SEXTLOAD)
313 .legalForTypesWithMemDesc({{s32, p0, s8, 8},
321 {v2s32, p0, s64, 8}})
322 .widenScalarToNextPow2(0)
323 .clampScalar(0, s32, s64)
326 .unsupportedIfMemSizeNotPow2()
340 LoadActions.legalForTypesWithMemDesc({
342 {nxv16s8, p0, nxv16s8, 8},
343 {nxv8s16, p0, nxv8s16, 8},
344 {nxv4s32, p0, nxv4s32, 8},
345 {nxv2s64, p0, nxv2s64, 8},
351 StoreActions.legalForTypesWithMemDesc({
353 {nxv16s8, p0, nxv16s8, 8},
354 {nxv8s16, p0, nxv8s16, 8},
355 {nxv4s32, p0, nxv4s32, 8},
356 {nxv2s64, p0, nxv2s64, 8},
362 return HasRCPC3 && Query.
Types[0] == s128 &&
366 return Query.
Types[0] == s128 &&
369 .legalForTypesWithMemDesc({{s8, p0, s8, 8},
376 {v16s8, p0, s128, 8},
378 {v8s16, p0, s128, 8},
380 {v4s32, p0, s128, 8},
381 {v2s64, p0, s128, 8}})
383 .legalForTypesWithMemDesc(
384 {{s32, p0, s8, 8}, {s32, p0, s16, 8}, {s64, p0, s32, 8}})
385 .widenScalarToNextPow2(0, 8)
386 .clampMaxNumElements(0, s8, 16)
387 .clampMaxNumElements(0, s16, 8)
388 .clampMaxNumElements(0, s32, 4)
389 .clampMaxNumElements(0, s64, 2)
390 .clampMaxNumElements(0, p0, 2)
391 .lowerIfMemSizeNotByteSizePow2()
392 .clampScalar(0, s8, s64)
396 return Query.
Types[0].isScalar() &&
398 Query.
Types[0].getSizeInBits() > 32;
407 .customIf(IsPtrVecPred)
408 .scalarizeIf(
typeInSet(0, {v2s16, v2s8}), 0);
412 return HasRCPC3 && Query.
Types[0] == s128 &&
416 return Query.
Types[0] == s128 &&
419 .legalForTypesWithMemDesc(
420 {{s8, p0, s8, 8}, {s16, p0, s8, 8},
423 {s16, p0, s16, 8}, {s32, p0, s16, 8},
425 {s32, p0, s8, 8}, {s32, p0, s16, 8}, {s32, p0, s32, 8},
426 {s64, p0, s64, 8}, {s64, p0, s32, 8},
427 {p0, p0, s64, 8}, {s128, p0, s128, 8}, {v16s8, p0, s128, 8},
428 {v8s8, p0, s64, 8}, {v4s16, p0, s64, 8}, {v8s16, p0, s128, 8},
429 {v2s32, p0, s64, 8}, {v4s32, p0, s128, 8}, {v2s64, p0, s128, 8}})
430 .clampScalar(0, s8, s64)
432 return Query.
Types[0].isScalar() &&
436 .clampMaxNumElements(0, s8, 16)
437 .clampMaxNumElements(0, s16, 8)
438 .clampMaxNumElements(0, s32, 4)
439 .clampMaxNumElements(0, s64, 2)
440 .clampMaxNumElements(0, p0, 2)
441 .lowerIfMemSizeNotPow2()
448 .customIf(IsPtrVecPred)
449 .scalarizeIf(
typeInSet(0, {v2s16, v2s8}), 0);
464 {p0, v16s8, v16s8, 8},
465 {p0, v4s16, v4s16, 8},
466 {p0, v8s16, v8s16, 8},
467 {p0, v2s32, v2s32, 8},
468 {p0, v4s32, v4s32, 8},
469 {p0, v2s64, v2s64, 8},
475 auto IndexedLoadBasicPred = [=](
const LegalityQuery &Query) {
503 return MemTy == s8 || MemTy == s16;
505 return MemTy == s8 || MemTy == s16 || MemTy == s32;
513 .widenScalarToNextPow2(0)
517 const auto &Ty = Query.
Types[0];
518 if (HasFP16 && Ty == s16)
520 return Ty == s32 || Ty == s64 || Ty == s128;
522 .clampScalar(0, MinFPScalar, s128);
526 .
legalFor({{s32, s32}, {s32, s64}, {s32, p0}})
528 .clampScalar(1, s32, s64)
529 .clampScalar(0, s32, s32)
530 .minScalarEltSameAsIf(
545 .clampNumElements(1, v8s8, v16s8)
546 .clampNumElements(1, v4s16, v8s16)
547 .clampNumElements(1, v2s32, v4s32)
548 .clampNumElements(1, v2s64, v2s64)
559 const auto &Ty = Query.
Types[1];
560 return (Ty == v8s16 || Ty == v4s16) && Ty == Query.
Types[0] && HasFP16;
563 .clampScalar(0, s32, s32)
564 .minScalarOrElt(1, MinFPScalar)
566 .minScalarEltSameAsIf(
574 .clampNumElements(1, v4s16, v8s16)
575 .clampNumElements(1, v2s32, v4s32)
576 .clampMaxNumElements(1, s64, 2)
577 .moreElementsToNextPow2(1)
578 .libcallFor({{s32, s128}});
582 unsigned DstSize = Query.
Types[0].getSizeInBits();
585 if (Query.
Types[0].isVector())
588 if (DstSize < 8 || DstSize >= 128 || !
isPowerOf2_32(DstSize))
603 .legalIf(ExtLegalFunc)
604 .
legalFor({{v2s64, v2s32}, {v4s32, v4s16}, {v8s16, v8s8}})
605 .clampScalar(0, s64, s64)
612 return (Query.
Types[0].getScalarSizeInBits() >
613 Query.
Types[1].getScalarSizeInBits() * 2) &&
614 Query.
Types[0].isVector() &&
615 (Query.
Types[1].getScalarSizeInBits() == 8 ||
616 Query.
Types[1].getScalarSizeInBits() == 16);
618 .clampMinNumElements(1, s8, 8)
622 .
legalFor({{v2s32, v2s64}, {v4s16, v4s32}, {v8s8, v8s16}})
624 .clampMaxNumElements(0, s8, 8)
625 .clampMaxNumElements(0, s16, 4)
626 .clampMaxNumElements(0, s32, 2)
636 .clampMinNumElements(0, s8, 8)
637 .clampMinNumElements(0, s16, 4)
642 .legalFor(PackedVectorAllTypeList)
653 {{s16, s32}, {s16, s64}, {s32, s64}, {v4s16, v4s32}, {v2s32, v2s64}})
654 .libcallFor({{s16, s128}, {s32, s128}, {s64, s128}})
655 .clampNumElements(0, v4s16, v4s16)
661 {{s32, s16}, {s64, s16}, {s64, s32}, {v4s32, v4s16}, {v2s64, v2s32}})
662 .libcallFor({{s128, s64}, {s128, s32}, {s128, s16}})
663 .clampNumElements(0, v4s32, v4s32)
669 .legalFor({{s32, s32},
678 (Query.
Types[1] == s16 || Query.
Types[1] == v4s16 ||
679 Query.
Types[1] == v8s16) &&
680 (Query.
Types[0] == s32 || Query.
Types[0] == s64 ||
681 Query.
Types[0] == v4s16 || Query.
Types[0] == v8s16);
689 return Query.
Types[1] == s16 && Query.
Types[0].getSizeInBits() > 64;
693 .widenScalarOrEltToNextPow2OrMinSize(0)
695 .widenScalarOrEltToNextPow2OrMinSize(1, HasFP16 ? 16 : 32)
698 return Query.
Types[0].getScalarSizeInBits() <= 64 &&
699 Query.
Types[0].getScalarSizeInBits() >
700 Query.
Types[1].getScalarSizeInBits();
705 return Query.
Types[1].getScalarSizeInBits() <= 64 &&
706 Query.
Types[0].getScalarSizeInBits() <
707 Query.
Types[1].getScalarSizeInBits();
710 .clampNumElements(0, v4s16, v8s16)
711 .clampNumElements(0, v2s32, v4s32)
712 .clampMaxNumElements(0, s64, 2)
714 {{s32, s128}, {s64, s128}, {s128, s128}, {s128, s32}, {s128, s64}});
717 .legalFor({{s32, s32},
726 (Query.
Types[0] == s16 || Query.
Types[0] == v4s16 ||
727 Query.
Types[0] == v8s16) &&
728 (Query.
Types[1] == s32 || Query.
Types[1] == s64 ||
729 Query.
Types[1] == v4s16 || Query.
Types[1] == v8s16);
734 .widenScalarOrEltToNextPow2OrMinSize(1)
736 .widenScalarOrEltToNextPow2OrMinSize(0, HasFP16 ? 16 : 32)
739 return Query.
Types[1].getScalarSizeInBits() <= 64 &&
740 Query.
Types[0].getScalarSizeInBits() <
741 Query.
Types[1].getScalarSizeInBits();
746 return Query.
Types[0].getScalarSizeInBits() <= 64 &&
747 Query.
Types[0].getScalarSizeInBits() >
748 Query.
Types[1].getScalarSizeInBits();
751 .clampNumElements(0, v4s16, v8s16)
752 .clampNumElements(0, v2s32, v4s32)
753 .clampMaxNumElements(0, s64, 2)
754 .libcallFor({{s16, s128},
764 .clampScalar(0, s32, s32);
768 .
legalFor({{s32, s32}, {s64, s32}, {p0, s32}})
769 .widenScalarToNextPow2(0)
787 .
legalFor({{s64, p0}, {v2s64, v2p0}})
788 .widenScalarToNextPow2(0, 64)
793 return Query.
Types[0].getSizeInBits() != Query.
Types[1].getSizeInBits();
795 .legalFor({{p0, s64}, {v2p0, v2s64}});
802 .legalForCartesianProduct({s64, v8s8, v4s16, v2s32})
803 .legalForCartesianProduct({s128, v16s8, v8s16, v4s32, v2s64, v2p0})
805 return Query.
Types[0].isVector() != Query.
Types[1].isVector();
808 .clampNumElements(0, v8s8, v16s8)
809 .clampNumElements(0, v4s16, v8s16)
810 .clampNumElements(0, v2s32, v4s32)
819 .clampScalar(0, s8, s64)
827 return ST.outlineAtomics() && !ST.hasLSE();
835 return Query.
Types[0].getSizeInBits() == 128 &&
836 !UseOutlineAtomics(Query);
843 G_ATOMICRMW_SUB, G_ATOMICRMW_AND, G_ATOMICRMW_OR,
854 {G_ATOMICRMW_MIN, G_ATOMICRMW_MAX, G_ATOMICRMW_UMIN, G_ATOMICRMW_UMAX})
861 for (
unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
862 unsigned BigTyIdx =
Op == G_MERGE_VALUES ? 0 : 1;
863 unsigned LitTyIdx =
Op == G_MERGE_VALUES ? 1 : 0;
870 switch (Q.
Types[BigTyIdx].getSizeInBits()) {
878 switch (Q.
Types[LitTyIdx].getSizeInBits()) {
892 const LLT &EltTy = Query.
Types[1].getElementType();
893 return Query.
Types[0] != EltTy;
898 return VecTy == v2s16 || VecTy == v4s16 || VecTy == v8s16 ||
899 VecTy == v4s32 || VecTy == v2s64 || VecTy == v2s32 ||
900 VecTy == v8s8 || VecTy == v16s8 || VecTy == v2p0;
906 return Query.
Types[1].getNumElements() <= 2;
911 return Query.
Types[1].getNumElements() <= 4;
916 return Query.
Types[1].getNumElements() <= 8;
921 return Query.
Types[1].getNumElements() <= 16;
924 .minScalarOrElt(0, s8)
934 typeInSet(0, {v16s8, v8s8, v8s16, v4s16, v4s32, v2s32, v2s64, v2p0}))
952 .clampNumElements(0, v4s32, v4s32)
962 {s32, s64, v8s8, v16s8, v4s16, v8s16, v2s32, v4s32})
964 .widenScalarToNextPow2(1, 32)
965 .clampScalar(1, s32, s64)
966 .scalarSameSizeAs(0, 1);
972 .widenScalarToNextPow2(0, 32)
983 return (HasCSSC &&
typeInSet(0, {s32, s64})(Query));
986 return (!HasCSSC &&
typeInSet(0, {s32, s64})(Query));
998 {v2s64, v2p0, v2s32, v4s32, v4s16, v16s8, v8s8, v8s16}, DstTy);
1003 return !Query.
Types[1].isVector();
1007 return Query.
Types[0].isVector() && Query.
Types[1].isVector() &&
1008 Query.
Types[0].getNumElements() >
1009 Query.
Types[1].getNumElements();
1015 return Query.
Types[0].isVector() && Query.
Types[1].isVector() &&
1016 Query.
Types[0].getNumElements() <
1017 Query.
Types[1].getNumElements();
1020 .widenScalarOrEltToNextPow2OrMinSize(0, 8)
1021 .clampNumElements(0, v8s8, v16s8)
1022 .clampNumElements(0, v4s16, v8s16)
1023 .clampNumElements(0, v4s32, v4s32)
1024 .clampNumElements(0, v2s64, v2s64);
1027 .
legalFor({{v4s32, v2s32}, {v8s16, v4s16}, {v16s8, v8s8}})
1030 return Query.
Types[0].getSizeInBits() <= 128 &&
1031 Query.
Types[1].getSizeInBits() <= 64;
1058 .customForCartesianProduct({p0}, {s8}, {s64})
1062 .legalForCartesianProduct({p0}, {p0}, {s64})
1078 .legalFor({s32, s64});
1079 ABSActions.legalFor(PackedVectorAllTypeList)
1087 [=](
const LegalityQuery &Query) {
return std::make_pair(0, v4s16); })
1090 [=](
const LegalityQuery &Query) {
return std::make_pair(0, v2s32); })
1091 .clampNumElements(0, v8s8, v16s8)
1092 .clampNumElements(0, v4s16, v8s16)
1093 .clampNumElements(0, v2s32, v4s32)
1094 .clampNumElements(0, v2s64, v2s64)
1095 .moreElementsToNextPow2(0)
1102 .
legalFor({{s32, v2s32}, {s32, v4s32}, {s64, v2s64}})
1104 const auto &Ty = Query.
Types[1];
1105 return (Ty == v4s16 || Ty == v8s16) && HasFP16;
1107 .minScalarOrElt(0, MinFPScalar)
1138 .clampMaxNumElements(1, s64, 2)
1145 G_VECREDUCE_FMINIMUM, G_VECREDUCE_FMAXIMUM})
1146 .legalFor({{s32, v4s32}, {s32, v2s32}, {s64, v2s64}})
1148 const auto &Ty = Query.
Types[1];
1149 return Query.
Types[0] == s16 && (Ty == v8s16 || Ty == v4s16) && HasFP16;
1151 .minScalarOrElt(0, MinFPScalar)
1165 {G_VECREDUCE_SMIN, G_VECREDUCE_SMAX, G_VECREDUCE_UMIN, G_VECREDUCE_UMAX})
1166 .legalFor({{s8, v8s8},
1174 return Query.
Types[1].isVector() &&
1175 Query.
Types[1].getElementType() != s8 &&
1176 Query.
Types[1].getNumElements() & 1;
1179 .clampMaxNumElements(1, s64, 2)
1187 {G_VECREDUCE_OR, G_VECREDUCE_AND, G_VECREDUCE_XOR})
1203 return std::make_pair(1, SrcTy.
divide(2));
1212 .customFor({{s32, s32}, {s32, s64}, {s64, s64}})
1216 .
legalFor({{s32, s64}, {s64, s64}})
1218 return Q.
Types[0].isScalar() && Q.
Types[1].getScalarSizeInBits() < 64;
1224 .customFor({{s32, s32}, {s64, s64}});
1230 .legalFor({{s32, s32},
1234 .customFor({{s128, s128},
1242 .legalFor({{v8s8, v8s8},
1244 .customFor({{s32, s32},
1253 .clampScalar(0, s32, s128)
1254 .widenScalarToNextPow2(0)
1255 .minScalarEltSameAsIf(always, 1, 0)
1256 .maxScalarEltSameAsIf(always, 1, 0);
1259 .legalFor({v2s64, v2s32, v4s32, v4s16, v8s16, v8s8, v16s8})
1260 .clampNumElements(0, v8s8, v16s8)
1270 .legalFor({{s64, s32}, {s64, s64}});
1286 G_GET_FPMODE, G_SET_FPMODE, G_RESET_FPMODE})
1296 verify(*ST.getInstrInfo());
1305 switch (
MI.getOpcode()) {
1309 case TargetOpcode::G_VAARG:
1310 return legalizeVaArg(
MI,
MRI, MIRBuilder);
1311 case TargetOpcode::G_LOAD:
1312 case TargetOpcode::G_STORE:
1313 return legalizeLoadStore(
MI,
MRI, MIRBuilder, Observer);
1314 case TargetOpcode::G_SHL:
1315 case TargetOpcode::G_ASHR:
1316 case TargetOpcode::G_LSHR:
1317 return legalizeShlAshrLshr(
MI,
MRI, MIRBuilder, Observer);
1318 case TargetOpcode::G_GLOBAL_VALUE:
1319 return legalizeSmallCMGlobalValue(
MI,
MRI, MIRBuilder, Observer);
1320 case TargetOpcode::G_SBFX:
1321 case TargetOpcode::G_UBFX:
1322 return legalizeBitfieldExtract(
MI,
MRI, Helper);
1323 case TargetOpcode::G_FSHL:
1324 case TargetOpcode::G_FSHR:
1325 return legalizeFunnelShift(
MI,
MRI, MIRBuilder, Observer, Helper);
1326 case TargetOpcode::G_ROTR:
1327 return legalizeRotate(
MI,
MRI, Helper);
1328 case TargetOpcode::G_CTPOP:
1329 return legalizeCTPOP(
MI,
MRI, Helper);
1330 case TargetOpcode::G_ATOMIC_CMPXCHG:
1331 return legalizeAtomicCmpxchg128(
MI,
MRI, Helper);
1332 case TargetOpcode::G_CTTZ:
1333 return legalizeCTTZ(
MI, Helper);
1334 case TargetOpcode::G_BZERO:
1335 case TargetOpcode::G_MEMCPY:
1336 case TargetOpcode::G_MEMMOVE:
1337 case TargetOpcode::G_MEMSET:
1338 return legalizeMemOps(
MI, Helper);
1339 case TargetOpcode::G_EXTRACT_VECTOR_ELT:
1340 return legalizeExtractVectorElt(
MI,
MRI, Helper);
1341 case TargetOpcode::G_DYN_STACKALLOC:
1342 return legalizeDynStackAlloc(
MI, Helper);
1343 case TargetOpcode::G_PREFETCH:
1344 return legalizePrefetch(
MI, Helper);
1345 case TargetOpcode::G_ABS:
1347 case TargetOpcode::G_ICMP:
1348 return legalizeICMP(
MI,
MRI, MIRBuilder);
1359 assert(
MI.getOpcode() == TargetOpcode::G_FSHL ||
1360 MI.getOpcode() == TargetOpcode::G_FSHR);
1364 Register ShiftNo =
MI.getOperand(3).getReg();
1365 LLT ShiftTy =
MRI.getType(ShiftNo);
1370 LLT OperationTy =
MRI.getType(
MI.getOperand(0).getReg());
1374 if (!VRegAndVal || VRegAndVal->Value.urem(
BitWidth) == 0)
1380 Amount =
MI.getOpcode() == TargetOpcode::G_FSHL ?
BitWidth - Amount : Amount;
1384 if (ShiftTy.
getSizeInBits() == 64 &&
MI.getOpcode() == TargetOpcode::G_FSHR &&
1391 if (
MI.getOpcode() == TargetOpcode::G_FSHR) {
1393 MI.getOperand(3).setReg(Cast64.getReg(0));
1398 else if (
MI.getOpcode() == TargetOpcode::G_FSHL) {
1400 {
MI.getOperand(1).
getReg(),
MI.getOperand(2).getReg(),
1402 MI.eraseFromParent();
1411 Register SrcReg1 =
MI.getOperand(2).getReg();
1412 Register SrcReg2 =
MI.getOperand(3).getReg();
1413 LLT DstTy =
MRI.getType(DstReg);
1414 LLT SrcTy =
MRI.getType(SrcReg1);
1431 MIRBuilder.
buildNot(DstReg, CmpReg);
1433 MI.eraseFromParent();
1443 LLT AmtTy =
MRI.getType(AmtReg);
1449 MI.getOperand(2).setReg(NewAmt.getReg(0));
1454bool AArch64LegalizerInfo::legalizeSmallCMGlobalValue(
1457 assert(
MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
1462 auto &GlobalOp =
MI.getOperand(1);
1464 if (GlobalOp.isSymbol())
1466 const auto* GV = GlobalOp.getGlobal();
1467 if (GV->isThreadLocal())
1476 auto Offset = GlobalOp.getOffset();
1481 MRI.setRegClass(
ADRP.getReg(0), &AArch64::GPR64RegClass);
1498 "Should not have folded in an offset for a tagged global!");
1500 .addGlobalAddress(GV, 0x100000000,
1503 MRI.setRegClass(
ADRP.getReg(0), &AArch64::GPR64RegClass);
1507 .addGlobalAddress(GV,
Offset,
1509 MI.eraseFromParent();
1516 switch (IntrinsicID) {
1517 case Intrinsic::vacopy: {
1519 unsigned VaListSize =
1531 VaListSize,
Align(PtrSize)));
1535 VaListSize,
Align(PtrSize)));
1536 MI.eraseFromParent();
1539 case Intrinsic::get_dynamic_area_offset: {
1542 MI.eraseFromParent();
1545 case Intrinsic::aarch64_mops_memset_tag: {
1546 assert(
MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS);
1550 auto &
Value =
MI.getOperand(3);
1552 Value.setReg(ExtValueReg);
1555 case Intrinsic::aarch64_prefetch: {
1557 auto &AddrVal =
MI.getOperand(1);
1559 int64_t IsWrite =
MI.getOperand(2).getImm();
1560 int64_t
Target =
MI.getOperand(3).getImm();
1561 int64_t IsStream =
MI.getOperand(4).getImm();
1562 int64_t IsData =
MI.getOperand(5).getImm();
1564 unsigned PrfOp = (IsWrite << 4) |
1570 MI.eraseFromParent();
1573 case Intrinsic::aarch64_neon_uaddv:
1574 case Intrinsic::aarch64_neon_saddv:
1575 case Intrinsic::aarch64_neon_umaxv:
1576 case Intrinsic::aarch64_neon_smaxv:
1577 case Intrinsic::aarch64_neon_uminv:
1578 case Intrinsic::aarch64_neon_sminv: {
1581 bool IsSigned = IntrinsicID == Intrinsic::aarch64_neon_saddv ||
1582 IntrinsicID == Intrinsic::aarch64_neon_smaxv ||
1583 IntrinsicID == Intrinsic::aarch64_neon_sminv;
1585 auto OldDst =
MI.getOperand(0).getReg();
1586 auto OldDstTy =
MRI.getType(OldDst);
1587 LLT NewDstTy =
MRI.getType(
MI.getOperand(2).getReg()).getElementType();
1588 if (OldDstTy == NewDstTy)
1591 auto NewDst =
MRI.createGenericVirtualRegister(NewDstTy);
1594 MI.getOperand(0).setReg(NewDst);
1598 MIB.
buildExtOrTrunc(IsSigned ? TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT,
1603 case Intrinsic::aarch64_neon_uaddlp:
1604 case Intrinsic::aarch64_neon_saddlp: {
1607 unsigned Opc = IntrinsicID == Intrinsic::aarch64_neon_uaddlp
1609 : AArch64::G_SADDLP;
1611 MI.eraseFromParent();
1615 case Intrinsic::aarch64_neon_uaddlv:
1616 case Intrinsic::aarch64_neon_saddlv: {
1620 unsigned Opc = IntrinsicID == Intrinsic::aarch64_neon_uaddlv
1622 : AArch64::G_SADDLV;
1625 LLT DstTy =
MRI.getType(DstReg);
1649 MI.eraseFromParent();
1653 case Intrinsic::aarch64_neon_smax:
1654 case Intrinsic::aarch64_neon_smin:
1655 case Intrinsic::aarch64_neon_umax:
1656 case Intrinsic::aarch64_neon_umin:
1657 case Intrinsic::aarch64_neon_fmax:
1658 case Intrinsic::aarch64_neon_fmin:
1659 case Intrinsic::aarch64_neon_fmaxnm:
1660 case Intrinsic::aarch64_neon_fminnm: {
1662 if (IntrinsicID == Intrinsic::aarch64_neon_smax)
1664 else if (IntrinsicID == Intrinsic::aarch64_neon_smin)
1666 else if (IntrinsicID == Intrinsic::aarch64_neon_umax)
1668 else if (IntrinsicID == Intrinsic::aarch64_neon_umin)
1670 else if (IntrinsicID == Intrinsic::aarch64_neon_fmax)
1671 MIB.
buildInstr(TargetOpcode::G_FMAXIMUM, {
MI.getOperand(0)},
1672 {
MI.getOperand(2),
MI.getOperand(3)});
1673 else if (IntrinsicID == Intrinsic::aarch64_neon_fmin)
1674 MIB.
buildInstr(TargetOpcode::G_FMINIMUM, {
MI.getOperand(0)},
1675 {
MI.getOperand(2),
MI.getOperand(3)});
1676 else if (IntrinsicID == Intrinsic::aarch64_neon_fmaxnm)
1677 MIB.
buildInstr(TargetOpcode::G_FMAXNUM, {
MI.getOperand(0)},
1678 {
MI.getOperand(2),
MI.getOperand(3)});
1679 else if (IntrinsicID == Intrinsic::aarch64_neon_fminnm)
1680 MIB.
buildInstr(TargetOpcode::G_FMINNUM, {
MI.getOperand(0)},
1681 {
MI.getOperand(2),
MI.getOperand(3)});
1682 MI.eraseFromParent();
1685 case Intrinsic::vector_reverse:
1693bool AArch64LegalizerInfo::legalizeShlAshrLshr(
1696 assert(
MI.getOpcode() == TargetOpcode::G_ASHR ||
1697 MI.getOpcode() == TargetOpcode::G_LSHR ||
1698 MI.getOpcode() == TargetOpcode::G_SHL);
1711 MI.getOperand(2).setReg(ExtCst.getReg(0));
1724 isShiftedInt<7, 3>(NewOffset)) {
1732bool AArch64LegalizerInfo::legalizeLoadStore(
1735 assert(
MI.getOpcode() == TargetOpcode::G_STORE ||
1736 MI.getOpcode() == TargetOpcode::G_LOAD);
1747 const LLT ValTy =
MRI.getType(ValReg);
1752 bool IsLoad =
MI.getOpcode() == TargetOpcode::G_LOAD;
1756 ST->hasLSE2() && ST->hasRCPC3() && (IsLoadAcquire || IsStoreRelease);
1762 Opcode = IsLoad ? AArch64::LDIAPPX : AArch64::STILPX;
1768 assert(ST->hasLSE2() &&
"ldp/stp not single copy atomic without +lse2");
1770 Opcode = IsLoad ? AArch64::LDPXi : AArch64::STPXi;
1775 NewI = MIRBuilder.
buildInstr(Opcode, {s64, s64}, {});
1781 Opcode, {}, {
Split->getOperand(0),
Split->getOperand(1)});
1785 NewI.
addUse(
MI.getOperand(1).getReg());
1796 *
MRI.getTargetRegisterInfo(),
1798 MI.eraseFromParent();
1804 LLVM_DEBUG(
dbgs() <<
"Tried to do custom legalization on wrong load/store");
1810 auto &MMO = **
MI.memoperands_begin();
1813 if (
MI.getOpcode() == TargetOpcode::G_STORE) {
1817 auto NewLoad = MIRBuilder.
buildLoad(NewTy,
MI.getOperand(1), MMO);
1820 MI.eraseFromParent();
1828 Align Alignment(
MI.getOperand(2).getImm());
1830 Register ListPtr =
MI.getOperand(1).getReg();
1832 LLT PtrTy =
MRI.getType(ListPtr);
1843 if (Alignment > PtrAlign) {
1847 auto ListTmp = MIRBuilder.
buildPtrAdd(PtrTy,
List, AlignMinus1.getReg(0));
1852 LLT ValTy =
MRI.getType(Dst);
1857 ValTy, std::max(Alignment, PtrAlign)));
1868 MI.eraseFromParent();
1872bool AArch64LegalizerInfo::legalizeBitfieldExtract(
1906 LLT Ty =
MRI.getType(Val);
1910 "Expected src and dst to have the same type!");
1918 auto Add = MIRBuilder.
buildAdd(s64, CTPOP1, CTPOP2);
1921 MI.eraseFromParent();
1925 if (!ST->hasNEON() ||
1926 MI.getMF()->getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) {
1938 assert((
Size == 32 ||
Size == 64 ||
Size == 128) &&
"Expected only 32, 64, or 128 bit scalars!");
1962 Sum = MIRBuilder.
buildInstr(AArch64::G_UDOT, {Dt}, {Zeros, Ones,
CTPOP});
1964 Sum = MIRBuilder.
buildInstr(AArch64::G_UDOT, {Dt}, {Zeros, Ones,
CTPOP});
1970 MI.eraseFromParent();
1978 Opc = Intrinsic::aarch64_neon_uaddlv;
1981 Opc = Intrinsic::aarch64_neon_uaddlp;
1984 Opc = Intrinsic::aarch64_neon_uaddlp;
1988 Opc = Intrinsic::aarch64_neon_uaddlp;
1993 Opc = Intrinsic::aarch64_neon_uaddlp;
1996 Opc = Intrinsic::aarch64_neon_uaddlp;
2002 for (
LLT HTy : HAddTys) {
2012 MI.eraseFromParent();
2016bool AArch64LegalizerInfo::legalizeAtomicCmpxchg128(
2020 auto Addr =
MI.getOperand(1).getReg();
2021 auto DesiredI = MIRBuilder.
buildUnmerge({s64, s64},
MI.getOperand(2));
2022 auto NewI = MIRBuilder.
buildUnmerge({s64, s64},
MI.getOperand(3));
2023 auto DstLo =
MRI.createGenericVirtualRegister(s64);
2024 auto DstHi =
MRI.createGenericVirtualRegister(s64);
2037 auto Ordering = (*
MI.memoperands_begin())->getMergedOrdering();
2041 Opcode = AArch64::CASPAX;
2044 Opcode = AArch64::CASPLX;
2048 Opcode = AArch64::CASPALX;
2051 Opcode = AArch64::CASPX;
2056 auto CASDst =
MRI.createGenericVirtualRegister(s128);
2057 auto CASDesired =
MRI.createGenericVirtualRegister(s128);
2058 auto CASNew =
MRI.createGenericVirtualRegister(s128);
2059 MIRBuilder.
buildInstr(TargetOpcode::REG_SEQUENCE, {CASDesired}, {})
2060 .addUse(DesiredI->getOperand(0).getReg())
2062 .
addUse(DesiredI->getOperand(1).getReg())
2063 .
addImm(AArch64::subo64);
2064 MIRBuilder.
buildInstr(TargetOpcode::REG_SEQUENCE, {CASNew}, {})
2068 .
addImm(AArch64::subo64);
2070 CAS = MIRBuilder.
buildInstr(Opcode, {CASDst}, {CASDesired, CASNew,
Addr});
2078 auto Ordering = (*
MI.memoperands_begin())->getMergedOrdering();
2082 Opcode = AArch64::CMP_SWAP_128_ACQUIRE;
2085 Opcode = AArch64::CMP_SWAP_128_RELEASE;
2089 Opcode = AArch64::CMP_SWAP_128;
2092 Opcode = AArch64::CMP_SWAP_128_MONOTONIC;
2096 auto Scratch =
MRI.createVirtualRegister(&AArch64::GPR64RegClass);
2097 CAS = MIRBuilder.
buildInstr(Opcode, {DstLo, DstHi, Scratch},
2098 {
Addr, DesiredI->getOperand(0),
2099 DesiredI->getOperand(1), NewI->
getOperand(0),
2105 *
MRI.getTargetRegisterInfo(),
2109 MI.eraseFromParent();
2117 LLT Ty =
MRI.getType(
MI.getOperand(1).getReg());
2119 MIRBuilder.
buildCTLZ(
MI.getOperand(0).getReg(), BitReverse);
2120 MI.eraseFromParent();
2129 if (
MI.getOpcode() == TargetOpcode::G_MEMSET) {
2132 auto &
Value =
MI.getOperand(1);
2135 Value.setReg(ExtValueReg);
2142bool AArch64LegalizerInfo::legalizeExtractVectorElt(
2144 assert(
MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT);
2153bool AArch64LegalizerInfo::legalizeDynStackAlloc(
2169 Register AllocSize =
MI.getOperand(1).getReg();
2173 "Unexpected type for dynamic alloca");
2175 "Unexpected type for dynamic alloca");
2177 LLT PtrTy =
MRI.getType(Dst);
2183 MIRBuilder.
buildInstr(AArch64::PROBED_STACKALLOC_DYN, {}, {SPTmp});
2184 MRI.setRegClass(NewMI.getReg(0), &AArch64::GPR64commonRegClass);
2185 MIRBuilder.
setInsertPt(*NewMI->getParent(), NewMI);
2188 MI.eraseFromParent();
2195 auto &AddrVal =
MI.getOperand(0);
2197 int64_t IsWrite =
MI.getOperand(1).getImm();
2198 int64_t Locality =
MI.getOperand(2).getImm();
2199 int64_t
IsData =
MI.getOperand(3).getImm();
2201 bool IsStream = Locality == 0;
2202 if (Locality != 0) {
2203 assert(Locality <= 3 &&
"Prefetch locality out-of-range");
2207 Locality = 3 - Locality;
2210 unsigned PrfOp = (IsWrite << 4) | (!IsData << 3) | (Locality << 1) | IsStream;
2213 MI.eraseFromParent();
unsigned const MachineRegisterInfo * MRI
static void matchLDPSTPAddrMode(Register Root, Register &Base, int &Offset, MachineRegisterInfo &MRI)
This file declares the targeting of the Machinelegalizer class for AArch64.
This file declares the targeting of the RegisterBankInfo class for AArch64.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
Interface for Targets to specify which operations they can successfully select and how the others sho...
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
bool legalizeCustom(LegalizerHelper &Helper, MachineInstr &MI, LostDebugLocObserver &LocObserver) const override
Called for instructions with the Custom LegalizationAction.
bool legalizeIntrinsic(LegalizerHelper &Helper, MachineInstr &MI) const override
AArch64LegalizerInfo(const AArch64Subtarget &ST)
bool isTargetWindows() const
const AArch64InstrInfo * getInstrInfo() const override
bool isTargetDarwin() const
bool isTargetILP32() const
const AArch64TargetLowering * getTargetLowering() const override
unsigned ClassifyGlobalReference(const GlobalValue *GV, const TargetMachine &TM) const
ClassifyGlobalReference - Find the target operand flags that describe how a global value should be re...
const RegisterBankInfo * getRegBankInfo() const override
Class for arbitrary precision integers.
APInt zext(unsigned width) const
Zero extend to a new width.
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
int64_t getSExtValue() const
Get sign extended value.
StringRef getValueAsString() const
Return the attribute's value as a string.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
This class represents an Operation in the Expression.
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Abstract class that contains various methods for clients to notify about changes.
virtual void changingInstr(MachineInstr &MI)=0
This instruction is about to be mutated in some way.
virtual void changedInstr(MachineInstr &MI)=0
This instruction was mutated in some way.
constexpr unsigned getScalarSizeInBits() const
constexpr bool isScalar() const
static constexpr LLT scalable_vector(unsigned MinNumElements, unsigned ScalarSizeInBits)
Get a low-level scalable vector of some number of elements and element width.
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
constexpr bool isPointerVector() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
constexpr ElementCount getElementCount() const
constexpr LLT changeElementSize(unsigned NewEltSize) const
If this type is a vector, return a vector with the same number of elements but the new element size.
constexpr unsigned getAddressSpace() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr LLT changeElementCount(ElementCount EC) const
Return a vector or scalar with the same element type and the new element count.
constexpr LLT divide(int Factor) const
Return a type that is Factor times smaller.
void computeTables()
Compute any ancillary tables needed to quickly decide how an operation should be handled.
LegalizeRuleSet & minScalar(unsigned TypeIdx, const LLT Ty)
Ensure the scalar is at least as wide as Ty.
LegalizeRuleSet & legalFor(std::initializer_list< LLT > Types)
The instruction is legal when type index 0 is any type in the given list.
LegalizeRuleSet & unsupported()
The instruction is unsupported.
LegalizeRuleSet & scalarSameSizeAs(unsigned TypeIdx, unsigned SameSizeIdx)
Change the type TypeIdx to have the same scalar size as type SameSizeIdx.
LegalizeRuleSet & libcallFor(std::initializer_list< LLT > Types)
LegalizeRuleSet & maxScalar(unsigned TypeIdx, const LLT Ty)
Ensure the scalar is at most as wide as Ty.
LegalizeRuleSet & minScalarOrElt(unsigned TypeIdx, const LLT Ty)
Ensure the scalar or element is at least as wide as Ty.
LegalizeRuleSet & clampMaxNumElements(unsigned TypeIdx, const LLT EltTy, unsigned MaxElements)
Limit the number of elements in EltTy vectors to at most MaxElements.
LegalizeRuleSet & clampMinNumElements(unsigned TypeIdx, const LLT EltTy, unsigned MinElements)
Limit the number of elements in EltTy vectors to at least MinElements.
LegalizeRuleSet & widenVectorEltsToVectorMinSize(unsigned TypeIdx, unsigned VectorSize)
Ensure the vector size is at least as wide as VectorSize by promoting the element.
LegalizeRuleSet & minScalarEltSameAsIf(LegalityPredicate Predicate, unsigned TypeIdx, unsigned LargeTypeIdx)
Conditionally widen the scalar or elt to match the size of another.
LegalizeRuleSet & customForCartesianProduct(std::initializer_list< LLT > Types)
LegalizeRuleSet & moreElementsToNextPow2(unsigned TypeIdx)
Add more elements to the vector to reach the next power of two.
LegalizeRuleSet & lower()
The instruction is lowered.
LegalizeRuleSet & moreElementsIf(LegalityPredicate Predicate, LegalizeMutation Mutation)
Add more elements to reach the type selected by the mutation if the predicate is true.
LegalizeRuleSet & lowerIf(LegalityPredicate Predicate)
The instruction is lowered if predicate is true.
LegalizeRuleSet & clampScalar(unsigned TypeIdx, const LLT MinTy, const LLT MaxTy)
Limit the range of scalar sizes to MinTy and MaxTy.
LegalizeRuleSet & custom()
Unconditionally custom lower.
LegalizeRuleSet & minScalarSameAs(unsigned TypeIdx, unsigned LargeTypeIdx)
Widen the scalar to match the size of another.
LegalizeRuleSet & unsupportedIf(LegalityPredicate Predicate)
LegalizeRuleSet & minScalarOrEltIf(LegalityPredicate Predicate, unsigned TypeIdx, const LLT Ty)
Ensure the scalar or element is at least as wide as Ty.
LegalizeRuleSet & clampNumElements(unsigned TypeIdx, const LLT MinTy, const LLT MaxTy)
Limit the number of elements for the given vectors to at least MinTy's number of elements and at most...
LegalizeRuleSet & maxScalarIf(LegalityPredicate Predicate, unsigned TypeIdx, const LLT Ty)
Conditionally limit the maximum size of the scalar.
LegalizeRuleSet & customIf(LegalityPredicate Predicate)
LegalizeRuleSet & widenScalarToNextPow2(unsigned TypeIdx, unsigned MinSize=0)
Widen the scalar to the next power of two that is at least MinSize.
LegalizeRuleSet & scalarize(unsigned TypeIdx)
LegalizeRuleSet & legalForCartesianProduct(std::initializer_list< LLT > Types)
The instruction is legal when type indexes 0 and 1 are both in the given list.
LegalizeRuleSet & legalForTypesWithMemDesc(std::initializer_list< LegalityPredicates::TypePairAndMemDesc > TypesAndMemDesc)
The instruction is legal when type indexes 0 and 1 along with the memory size and minimum alignment i...
LegalizeRuleSet & libcallIf(LegalityPredicate Predicate)
Like legalIf, but for the Libcall action.
LegalizeRuleSet & legalIf(LegalityPredicate Predicate)
The instruction is legal if predicate is true.
LegalizeResult lowerDynStackAlloc(MachineInstr &MI)
LegalizeResult lowerBitCount(MachineInstr &MI)
LegalizeResult lowerExtractInsertVectorElt(MachineInstr &MI)
Lower a vector extract or insert by writing the vector to a stack temporary and reloading the element...
LegalizeResult lowerAbsToCNeg(MachineInstr &MI)
const TargetLowering & getTargetLowering() const
LegalizeResult lowerFunnelShiftAsShifts(MachineInstr &MI)
@ Legalized
Instruction has been legalized and the MachineFunction changed.
@ UnableToLegalize
Some kind of error has occurred and we could not legalize this instruction.
GISelChangeObserver & Observer
To keep track of changes made by the LegalizerHelper.
Register getDynStackAllocTargetPtr(Register SPReg, Register AllocSize, Align Alignment, LLT PtrTy)
MachineIRBuilder & MIRBuilder
Expose MIRBuilder so clients can set their own RecordInsertInstruction functions.
LegalizeRuleSet & getActionDefinitionsBuilder(unsigned Opcode)
Get the action definition builder for the given opcode.
const LegacyLegalizerInfo & getLegacyLegalizerInfo() const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
void setInsertPt(MachineBasicBlock &MBB, MachineBasicBlock::iterator II)
Set the insertion point before the specified position.
MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ADD Op0, Op1.
MachineInstrBuilder buildNot(const DstOp &Dst, const SrcOp &Src0)
Build and insert a bitwise not, NegOne = G_CONSTANT -1 Res = G_OR Op0, NegOne.
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert Res0, ... = G_EXTRACT Src, Idx0.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ZEXT Op.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildCTLZ(const DstOp &Dst, const SrcOp &Src0)
Build and insert Res = G_CTLZ Op0, Src0.
MachineInstrBuilder buildSMax(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_SMAX Op0, Op1.
MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildBitReverse(const DstOp &Dst, const SrcOp &Src)
Build and insert Dst = G_BITREVERSE Src.
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildCTPOP(const DstOp &Dst, const SrcOp &Src0)
Build and insert Res = G_CTPOP Op0, Src0.
MachineFunction & getMF()
Getter for the function we currently build.
MachineInstrBuilder buildSMin(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_SMIN Op0, Op1.
MachineInstrBuilder buildExtOrTrunc(unsigned ExtOpc, const DstOp &Res, const SrcOp &Op)
Build and insert Res = ExtOpc, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes of...
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_TRUNC Op.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
MachineInstrBuilder buildBitcast(const DstOp &Dst, const SrcOp &Src)
Build and insert Dst = G_BITCAST Src.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildMaskLowPtrBits(const DstOp &Res, const SrcOp &Op0, uint32_t NumBits)
Build and insert Res = G_PTRMASK Op0, G_CONSTANT (1 << NumBits) - 1.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildUMin(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_UMIN Op0, Op1.
MachineInstrBuilder buildUMax(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_UMAX Op0, Op1.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
Representation of each machine instruction.
const MachineOperand & getOperand(unsigned i) const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
void setReg(Register Reg)
Change the register this operand corresponds to.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
Wrapper class representing virtual and physical registers.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
const TargetMachine & getTargetMachine() const
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
Primary interface to the complete machine description for the target machine.
Target - Wrapper for Target specific information.
LLVM Value Representation.
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ MO_NC
MO_NC - Indicates whether the linker is expected to check the symbol reference for overflow.
@ MO_PAGEOFF
MO_PAGEOFF - A symbol operand with this flag represents the offset of that symbol within a 4K page.
@ MO_GOT
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
@ MO_PREL
MO_PREL - Indicates that the bits of the symbol operand represented by MO_G0 etc are PC relative.
@ MO_PAGE
MO_PAGE - A symbol operand with this flag represents the pc-relative offset of the 4K page containing...
@ MO_TAGGED
MO_TAGGED - With MO_PAGE, indicates that the page includes a memory tag in bits 56-63.
@ MO_G3
MO_G3 - A symbol operand with this flag (granule 3) represents the high 16-bits of a 64-bit address,...
LegalityPredicate scalarOrEltWiderThan(unsigned TypeIdx, unsigned Size)
True iff the specified type index is a scalar or a vector with an element type that's wider than the ...
LegalityPredicate typeInSet(unsigned TypeIdx, std::initializer_list< LLT > TypesInit)
True iff the given type index is one of the specified types.
LegalityPredicate smallerThan(unsigned TypeIdx0, unsigned TypeIdx1)
True iff the first type index has a smaller total bit size than second type index.
LegalityPredicate atomicOrderingAtLeastOrStrongerThan(unsigned MMOIdx, AtomicOrdering Ordering)
True iff the specified MMO index has at an atomic ordering of at Ordering or stronger.
LegalityPredicate isVector(unsigned TypeIdx)
True iff the specified type index is a vector.
Predicate all(Predicate P0, Predicate P1)
True iff P0 and P1 are true.
LegalityPredicate typeIs(unsigned TypeIdx, LLT TypesInit)
True iff the given type index is the specified type.
Predicate predNot(Predicate P)
True iff P is false.
@ Bitcast
Perform the operation on a different, but equivalently sized type.
LegalizeMutation moreElementsToNextPow2(unsigned TypeIdx, unsigned Min=0)
Add more elements to the type for the given type index to the next power of.
LegalizeMutation scalarize(unsigned TypeIdx)
Break up the vector type for the given type index into the element type.
LegalizeMutation widenScalarOrEltToNextPow2(unsigned TypeIdx, unsigned Min=0)
Widen the scalar type or vector element type for the given type index to the next power of 2.
LegalizeMutation changeTo(unsigned TypeIdx, LLT Ty)
Select this specific type for the given type index.
LegalizeMutation changeElementSizeTo(unsigned TypeIdx, unsigned FromTypeIdx)
Change the scalar size or element size to have the same scalar size as type index FromIndex.
operand_type_match m_Reg()
ConstantMatch< APInt > m_ICst(APInt &Cst)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
BinaryOp_match< LHS, RHS, TargetOpcode::G_PTR_ADD, false > m_GPtrAdd(const LHS &L, const RHS &R)
This is an optimization pass for GlobalISel generic memory operations.
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
AtomicOrdering
Atomic ordering for LLVM's memory model.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
constexpr unsigned BitWidth
std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Align assumeAligned(uint64_t Value)
Treats the value 0 as a 1, so Align is always at least 1.
unsigned Log2(Align A)
Returns the log2 of the alignment.
std::function< bool(const LegalityQuery &)> LegalityPredicate
This struct is a compact representation of a valid (non-zero power of two) alignment.
The LegalityQuery object bundles together all the information that's needed to decide whether a given...
ArrayRef< MemDesc > MMODescrs
Operations which require memory can use this to place requirements on the memory type for each MMO.
This class contains a discriminated union of information about pointers in memory operands,...