35#include "llvm/IR/IntrinsicsWebAssembly.h"
42#define DEBUG_TYPE "wasm-lower"
47 auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32;
61 Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32);
67 if (Subtarget->hasSIMD128()) {
75 if (Subtarget->hasFP16()) {
78 if (Subtarget->hasReferenceTypes()) {
81 if (Subtarget->hasExceptionHandling()) {
90 for (
auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64}) {
94 if (Subtarget->hasSIMD128()) {
95 for (
auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
101 if (Subtarget->hasFP16()) {
105 if (Subtarget->hasReferenceTypes()) {
108 for (
auto T : {MVT::externref, MVT::funcref, MVT::Other}) {
129 for (
auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64, MVT::v8f16}) {
130 if (!Subtarget->hasFP16() &&
T == MVT::v8f16) {
143 if (
MVT(
T).isVector())
157 if (
T != MVT::v8f16) {
161 if (Subtarget->hasFP16() &&
T == MVT::f32) {
175 for (
auto T : {MVT::i32, MVT::i64})
177 if (Subtarget->hasSIMD128())
178 for (
auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
182 if (Subtarget->hasWideArithmetic()) {
190 if (Subtarget->hasNontrappingFPToInt())
192 for (
auto T : {MVT::i32, MVT::i64})
195 if (Subtarget->hasRelaxedSIMD()) {
198 {MVT::v4f32, MVT::v2f64},
Custom);
201 if (Subtarget->hasSIMD128()) {
236 for (
auto T : {MVT::v16i8, MVT::v8i16})
240 for (
auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
244 for (
auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
248 if (Subtarget->hasFP16()) {
254 for (
auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
258 if (Subtarget->hasFP16())
262 for (
auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
270 for (
auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
275 for (
auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
285 for (
auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64})
290 for (
auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
300 for (
auto T : {MVT::v8i16, MVT::v4i32, MVT::v2i64})
306 for (
auto T : {MVT::v4f32, MVT::v2f64})
316 for (
auto T : {MVT::v2i64, MVT::v2f64})
322 if (Subtarget->hasFP16()) {
334 if (Subtarget->hasFP16()) {
338 if (Subtarget->hasRelaxedSIMD()) {
353 if (!Subtarget->hasSignExt()) {
355 auto Action = Subtarget->hasSIMD128() ?
Custom :
Expand;
356 for (
auto T : {MVT::i8, MVT::i16, MVT::i32})
372 for (
auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
376 if (Subtarget->hasReferenceTypes())
378 for (
auto T : {MVT::externref, MVT::funcref})
383 {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64, MVT::v2f64})
399 if (Subtarget->hasSIMD128()) {
400 for (
auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32,
403 if (
MVT(
T) != MemT) {
442 return MVT::externref;
451 return MVT::externref;
458WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(
475bool WebAssemblyTargetLowering::shouldScalarizeBinop(
SDValue VecOp)
const {
495FastISel *WebAssemblyTargetLowering::createFastISel(
501MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(
const DataLayout & ,
512 "32-bit shift counts ought to be enough for anyone");
517 "Unable to represent scalar shift amount type");
527 bool IsUnsigned,
bool Int64,
528 bool Float64,
unsigned LoweredOpcode) {
534 unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32;
535 unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32;
536 unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32;
537 unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32;
538 unsigned IConst = Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32;
539 unsigned Eqz = WebAssembly::EQZ_I32;
540 unsigned And = WebAssembly::AND_I32;
541 int64_t Limit = Int64 ?
INT64_MIN : INT32_MIN;
542 int64_t Substitute = IsUnsigned ? 0 : Limit;
543 double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(double)Limit;
554 F->insert(It, FalseMBB);
555 F->insert(It, TrueMBB);
556 F->insert(It, DoneMBB);
559 DoneMBB->
splice(DoneMBB->
begin(), BB, std::next(
MI.getIterator()), BB->
end());
567 unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg;
575 MI.eraseFromParent();
632 if (Def->getOpcode() == WebAssembly::CONST_I32 ||
633 Def->getOpcode() == WebAssembly::CONST_I64) {
634 if (Def->getOperand(1).getImm() == 0) {
636 MI.eraseFromParent();
640 unsigned MemoryCopy =
641 Int64 ? WebAssembly::MEMORY_COPY_A64 : WebAssembly::MEMORY_COPY_A32;
648 MI.eraseFromParent();
659 unsigned Eqz = Int64 ? WebAssembly::EQZ_I64 : WebAssembly::EQZ_I32;
660 unsigned MemoryCopy =
661 Int64 ? WebAssembly::MEMORY_COPY_A64 : WebAssembly::MEMORY_COPY_A32;
672 F->insert(It, TrueMBB);
673 F->insert(It, DoneMBB);
676 DoneMBB->
splice(DoneMBB->
begin(), BB, std::next(
MI.getIterator()), BB->
end());
689 MI.eraseFromParent();
723 if (Def->getOpcode() == WebAssembly::CONST_I32 ||
724 Def->getOpcode() == WebAssembly::CONST_I64) {
725 if (Def->getOperand(1).getImm() == 0) {
727 MI.eraseFromParent();
731 unsigned MemoryFill =
732 Int64 ? WebAssembly::MEMORY_FILL_A64 : WebAssembly::MEMORY_FILL_A32;
738 MI.eraseFromParent();
749 unsigned Eqz = Int64 ? WebAssembly::EQZ_I64 : WebAssembly::EQZ_I32;
750 unsigned MemoryFill =
751 Int64 ? WebAssembly::MEMORY_FILL_A64 : WebAssembly::MEMORY_FILL_A32;
762 F->insert(It, TrueMBB);
763 F->insert(It, DoneMBB);
766 DoneMBB->
splice(DoneMBB->
begin(), BB, std::next(
MI.getIterator()), BB->
end());
779 MI.eraseFromParent();
801 CallResults.
getOpcode() == WebAssembly::RET_CALL_RESULTS);
805 bool IsRetCall = CallResults.
getOpcode() == WebAssembly::RET_CALL_RESULTS;
807 bool IsFuncrefCall =
false;
813 IsFuncrefCall = (TRC == &WebAssembly::FUNCREFRegClass);
818 if (IsIndirect && IsRetCall) {
819 CallOp = WebAssembly::RET_CALL_INDIRECT;
820 }
else if (IsIndirect) {
821 CallOp = WebAssembly::CALL_INDIRECT;
822 }
else if (IsRetCall) {
823 CallOp = WebAssembly::RET_CALL;
825 CallOp = WebAssembly::CALL;
854 for (
auto Def : CallResults.
defs())
878 for (
auto Use : CallParams.
uses())
894 if (IsIndirect && IsFuncrefCall) {
906 BuildMI(MF,
DL,
TII.get(WebAssembly::REF_NULL_FUNCREF), RegFuncref);
910 BuildMI(MF,
DL,
TII.get(WebAssembly::TABLE_SET_FUNCREF))
922 const TargetInstrInfo &
TII = *Subtarget->getInstrInfo();
925 switch (
MI.getOpcode()) {
928 case WebAssembly::FP_TO_SINT_I32_F32:
930 WebAssembly::I32_TRUNC_S_F32);
931 case WebAssembly::FP_TO_UINT_I32_F32:
933 WebAssembly::I32_TRUNC_U_F32);
934 case WebAssembly::FP_TO_SINT_I64_F32:
936 WebAssembly::I64_TRUNC_S_F32);
937 case WebAssembly::FP_TO_UINT_I64_F32:
939 WebAssembly::I64_TRUNC_U_F32);
940 case WebAssembly::FP_TO_SINT_I32_F64:
942 WebAssembly::I32_TRUNC_S_F64);
943 case WebAssembly::FP_TO_UINT_I32_F64:
945 WebAssembly::I32_TRUNC_U_F64);
946 case WebAssembly::FP_TO_SINT_I64_F64:
948 WebAssembly::I64_TRUNC_S_F64);
949 case WebAssembly::FP_TO_UINT_I64_F64:
951 WebAssembly::I64_TRUNC_U_F64);
952 case WebAssembly::MEMCPY_A32:
954 case WebAssembly::MEMCPY_A64:
956 case WebAssembly::MEMSET_A32:
958 case WebAssembly::MEMSET_A64:
960 case WebAssembly::CALL_RESULTS:
961 case WebAssembly::RET_CALL_RESULTS:
966std::pair<unsigned, const TargetRegisterClass *>
967WebAssemblyTargetLowering::getRegForInlineAsmConstraint(
971 if (Constraint.
size() == 1) {
972 switch (Constraint[0]) {
974 assert(VT != MVT::iPTR &&
"Pointer MVT not expected here");
975 if (Subtarget->hasSIMD128() && VT.
isVector()) {
977 return std::make_pair(0U, &WebAssembly::V128RegClass);
981 return std::make_pair(0U, &WebAssembly::I32RegClass);
983 return std::make_pair(0U, &WebAssembly::I64RegClass);
988 return std::make_pair(0U, &WebAssembly::F32RegClass);
990 return std::make_pair(0U, &WebAssembly::F64RegClass);
1004bool WebAssemblyTargetLowering::isCheapToSpeculateCttz(
Type *Ty)
const {
1009bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz(
Type *Ty)
const {
1014bool WebAssemblyTargetLowering::isLegalAddressingMode(
const DataLayout &
DL,
1016 Type *Ty,
unsigned AS,
1021 if (AM.BaseOffs < 0)
1032bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
1046bool WebAssemblyTargetLowering::isIntDivCheap(
EVT VT,
1047 AttributeList Attr)
const {
1053bool WebAssemblyTargetLowering::isVectorLoadExtDesirable(
SDValue ExtVal)
const {
1061 EVT MemT =
Load->getValueType(0);
1062 return (ExtT == MVT::v8i16 && MemT == MVT::v8i8) ||
1063 (ExtT == MVT::v4i32 && MemT == MVT::v4i16) ||
1064 (ExtT == MVT::v2i64 && MemT == MVT::v2i32);
1067bool WebAssemblyTargetLowering::isOffsetFoldingLegal(
1070 const GlobalValue *GV = GA->
getGlobal();
1074EVT WebAssemblyTargetLowering::getSetCCResultType(
const DataLayout &
DL,
1091void WebAssemblyTargetLowering::getTgtMemIntrinsic(
1095 switch (Intrinsic) {
1096 case Intrinsic::wasm_memory_atomic_notify:
1098 Info.memVT = MVT::i32;
1099 Info.ptrVal =
I.getArgOperand(0);
1111 case Intrinsic::wasm_memory_atomic_wait32:
1113 Info.memVT = MVT::i32;
1114 Info.ptrVal =
I.getArgOperand(0);
1120 case Intrinsic::wasm_memory_atomic_wait64:
1122 Info.memVT = MVT::i64;
1123 Info.ptrVal =
I.getArgOperand(0);
1129 case Intrinsic::wasm_loadf16_f32:
1131 Info.memVT = MVT::f16;
1132 Info.ptrVal =
I.getArgOperand(0);
1138 case Intrinsic::wasm_storef16_f32:
1140 Info.memVT = MVT::f16;
1141 Info.ptrVal =
I.getArgOperand(1);
1152void WebAssemblyTargetLowering::computeKnownBitsForTargetNode(
1155 switch (
Op.getOpcode()) {
1159 unsigned IntNo =
Op.getConstantOperandVal(0);
1163 case Intrinsic::wasm_bitmask: {
1165 EVT VT =
Op.getOperand(1).getSimpleValueType();
1168 Known.
Zero |= ZeroMask;
1174 case WebAssemblyISD::EXTEND_LOW_U:
1175 case WebAssemblyISD::EXTEND_HIGH_U: {
1180 if (VT == MVT::v8i8 || VT == MVT::v16i8) {
1184 }
else if (VT == MVT::v4i16 || VT == MVT::v8i16) {
1188 }
else if (VT == MVT::v2i32 || VT == MVT::v4i32) {
1198 case WebAssemblyISD::I64_ADD128:
1199 if (
Op.getResNo() == 1) {
1210WebAssemblyTargetLowering::getPreferredVectorAction(
MVT VT)
const {
1216 if (EltVT == MVT::i8 || EltVT == MVT::i16 || EltVT == MVT::i32 ||
1217 EltVT == MVT::i64 || EltVT == MVT::f32 || EltVT == MVT::f64)
1224bool WebAssemblyTargetLowering::isFMAFasterThanFMulAndFAdd(
1226 if (!Subtarget->hasFP16() || !VT.
isVector())
1236bool WebAssemblyTargetLowering::shouldSimplifyDemandedVectorElts(
1237 SDValue Op,
const TargetLoweringOpt &TLO)
const {
1290WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
1292 SelectionDAG &DAG = CLI.DAG;
1302 "WebAssembly doesn't support language-specific or target-specific "
1303 "calling conventions yet");
1304 if (CLI.IsPatchPoint)
1305 fail(
DL, DAG,
"WebAssembly doesn't support patch point yet");
1307 if (CLI.IsTailCall) {
1308 auto NoTail = [&](
const char *Msg) {
1309 if (CLI.CB && CLI.CB->isMustTailCall())
1311 CLI.IsTailCall =
false;
1314 if (!Subtarget->hasTailCall())
1315 NoTail(
"WebAssembly 'tail-call' feature not enabled");
1319 NoTail(
"WebAssembly does not support varargs tail calls");
1324 Type *RetTy =
F.getReturnType();
1329 bool TypesMatch = CallerRetTys.
size() == CalleeRetTys.
size() &&
1330 std::equal(CallerRetTys.
begin(), CallerRetTys.
end(),
1331 CalleeRetTys.
begin());
1333 NoTail(
"WebAssembly tail call requires caller and callee return types to "
1338 for (
auto &Arg : CLI.CB->args()) {
1339 Value *Val = Arg.get();
1344 Src =
GEP->getPointerOperand();
1351 "WebAssembly does not support tail calling with stack arguments");
1358 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1359 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1360 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1366 Outs[0].Flags.isSRet()) {
1371 bool HasSwiftSelfArg =
false;
1372 bool HasSwiftErrorArg =
false;
1373 bool HasSwiftAsyncArg =
false;
1374 unsigned NumFixedArgs = 0;
1375 for (
unsigned I = 0;
I < Outs.
size(); ++
I) {
1376 const ISD::OutputArg &Out = Outs[
I];
1382 fail(
DL, DAG,
"WebAssembly hasn't implemented nest arguments");
1384 fail(
DL, DAG,
"WebAssembly hasn't implemented inalloca arguments");
1386 fail(
DL, DAG,
"WebAssembly hasn't implemented cons regs arguments");
1388 fail(
DL, DAG,
"WebAssembly hasn't implemented cons regs last arguments");
1397 Chain = DAG.
getMemcpy(Chain,
DL, FINode, OutVal, SizeNode,
1400 nullptr, std::nullopt, MachinePointerInfo(),
1401 MachinePointerInfo());
1408 bool IsVarArg = CLI.IsVarArg;
1417 if (!HasSwiftSelfArg) {
1419 ISD::ArgFlagsTy
Flags;
1420 Flags.setSwiftSelf();
1421 ISD::OutputArg Arg(Flags, PtrVT, EVT(PtrVT), PtrTy, 0, 0);
1422 CLI.Outs.push_back(Arg);
1424 CLI.OutVals.push_back(ArgVal);
1426 if (!HasSwiftErrorArg) {
1428 ISD::ArgFlagsTy
Flags;
1429 Flags.setSwiftError();
1430 ISD::OutputArg Arg(Flags, PtrVT, EVT(PtrVT), PtrTy, 0, 0);
1431 CLI.Outs.push_back(Arg);
1433 CLI.OutVals.push_back(ArgVal);
1437 ISD::ArgFlagsTy
Flags;
1438 Flags.setSwiftAsync();
1439 ISD::OutputArg Arg(Flags, PtrVT, EVT(PtrVT), PtrTy, 0, 0);
1440 CLI.Outs.push_back(Arg);
1442 CLI.OutVals.push_back(ArgVal);
1448 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.
getContext());
1453 for (
unsigned I = NumFixedArgs;
I < Outs.
size(); ++
I) {
1454 const ISD::OutputArg &Out = Outs[
I];
1457 assert(VT != MVT::iPTR &&
"Legalized args should be concrete");
1462 CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty), Alignment);
1469 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
1472 if (IsVarArg && NumBytes) {
1475 MaybeAlign StackAlign = Layout.getStackAlignment();
1476 assert(StackAlign &&
"data layout string is missing stack alignment");
1482 assert(ArgLocs[ValNo].getValNo() == ValNo &&
1483 "ArgLocs should remain in order and only hold varargs args");
1484 unsigned Offset = ArgLocs[ValNo++].getLocMemOffset();
1492 if (!Chains.
empty())
1494 }
else if (IsVarArg) {
1512 Ops.push_back(Chain);
1513 Ops.push_back(Callee);
1518 IsVarArg ? OutVals.
begin() + NumFixedArgs : OutVals.
end());
1521 Ops.push_back(FINode);
1524 for (
const auto &In : Ins) {
1525 assert(!
In.Flags.isByVal() &&
"byval is not valid for return values");
1526 assert(!
In.Flags.isNest() &&
"nest is not valid for return values");
1527 if (
In.Flags.isInAlloca())
1528 fail(
DL, DAG,
"WebAssembly hasn't implemented inalloca return values");
1529 if (
In.Flags.isInConsecutiveRegs())
1530 fail(
DL, DAG,
"WebAssembly hasn't implemented cons regs return values");
1531 if (
In.Flags.isInConsecutiveRegsLast())
1533 "WebAssembly hasn't implemented cons regs last return values");
1542 CLI.CB->getCalledOperand()->getType())) {
1557 WebAssemblyISD::TABLE_SET,
DL, DAG.
getVTList(MVT::Other), TableSetOps,
1562 CLI.CB->getCalledOperand()->getPointerAlignment(DAG.
getDataLayout()),
1568 if (CLI.IsTailCall) {
1570 SDVTList NodeTys = DAG.
getVTList(MVT::Other, MVT::Glue);
1575 SDVTList InTyList = DAG.
getVTList(InTys);
1578 for (
size_t I = 0;
I < Ins.size(); ++
I)
1585bool WebAssemblyTargetLowering::CanLowerReturn(
1588 const Type *RetTy)
const {
1593SDValue WebAssemblyTargetLowering::LowerReturn(
1599 "MVP WebAssembly can only return up to one value");
1601 fail(
DL, DAG,
"WebAssembly doesn't support non-C calling conventions");
1604 RetOps.append(OutVals.
begin(), OutVals.
end());
1605 Chain = DAG.
getNode(WebAssemblyISD::RETURN,
DL, MVT::Other, RetOps);
1608 for (
const ISD::OutputArg &Out : Outs) {
1613 fail(
DL, DAG,
"WebAssembly hasn't implemented inalloca results");
1615 fail(
DL, DAG,
"WebAssembly hasn't implemented cons regs results");
1617 fail(
DL, DAG,
"WebAssembly hasn't implemented cons regs last results");
1623SDValue WebAssemblyTargetLowering::LowerFormalArguments(
1628 fail(
DL, DAG,
"WebAssembly doesn't support non-C calling conventions");
1631 auto *MFI = MF.
getInfo<WebAssemblyFunctionInfo>();
1637 bool HasSwiftErrorArg =
false;
1638 bool HasSwiftSelfArg =
false;
1639 bool HasSwiftAsyncArg =
false;
1640 for (
const ISD::InputArg &In : Ins) {
1641 HasSwiftSelfArg |=
In.Flags.isSwiftSelf();
1642 HasSwiftErrorArg |=
In.Flags.isSwiftError();
1643 HasSwiftAsyncArg |=
In.Flags.isSwiftAsync();
1644 if (
In.Flags.isInAlloca())
1645 fail(
DL, DAG,
"WebAssembly hasn't implemented inalloca arguments");
1646 if (
In.Flags.isNest())
1647 fail(
DL, DAG,
"WebAssembly hasn't implemented nest arguments");
1648 if (
In.Flags.isInConsecutiveRegs())
1649 fail(
DL, DAG,
"WebAssembly hasn't implemented cons regs arguments");
1650 if (
In.Flags.isInConsecutiveRegsLast())
1651 fail(
DL, DAG,
"WebAssembly hasn't implemented cons regs last arguments");
1660 MFI->addParam(
In.VT);
1669 if (!HasSwiftSelfArg) {
1670 MFI->addParam(PtrVT);
1672 if (!HasSwiftErrorArg) {
1673 MFI->addParam(PtrVT);
1676 MFI->addParam(PtrVT);
1685 MFI->setVarargBufferVreg(VarargVreg);
1687 Chain,
DL, VarargVreg,
1688 DAG.
getNode(WebAssemblyISD::ARGUMENT,
DL, PtrVT,
1690 MFI->addParam(PtrVT);
1702 assert(MFI->getParams().size() == Params.
size() &&
1703 std::equal(MFI->getParams().begin(), MFI->getParams().end(),
1709void WebAssemblyTargetLowering::ReplaceNodeResults(
1711 switch (
N->getOpcode()) {
1725 EVT VT =
N->getValueType(0);
1727 if (VT == MVT::v4f16 && Src.getValueType() == MVT::v4f32) {
1729 DAG.
getNode(WebAssemblyISD::DEMOTE_ZERO, SDLoc(
N), MVT::v8f16, Src));
1735 Results.push_back(Replace128Op(
N, DAG));
1739 "ReplaceNodeResults not implemented for this op for WebAssembly!");
1750 switch (
Op.getOpcode()) {
1755 return LowerFrameIndex(
Op, DAG);
1757 return LowerGlobalAddress(
Op, DAG);
1759 return LowerGlobalTLSAddress(
Op, DAG);
1761 return LowerExternalSymbol(
Op, DAG);
1763 return LowerJumpTable(
Op, DAG);
1765 return LowerBR_JT(
Op, DAG);
1767 return LowerVASTART(
Op, DAG);
1770 fail(
DL, DAG,
"WebAssembly hasn't implemented computed gotos");
1773 return LowerRETURNADDR(
Op, DAG);
1775 return LowerFRAMEADDR(
Op, DAG);
1777 return LowerCopyToReg(
Op, DAG);
1780 return LowerAccessVectorElement(
Op, DAG);
1784 return LowerIntrinsic(
Op, DAG);
1786 return LowerSIGN_EXTEND_INREG(
Op, DAG);
1790 return LowerEXTEND_VECTOR_INREG(
Op, DAG);
1792 return LowerBUILD_VECTOR(
Op, DAG);
1794 return LowerVECTOR_SHUFFLE(
Op, DAG);
1796 return LowerSETCC(
Op, DAG);
1800 return LowerShift(
Op, DAG);
1803 return LowerFP_TO_INT_SAT(
Op, DAG);
1806 return LowerFMIN(
Op, DAG);
1809 return LowerFMAX(
Op, DAG);
1811 return LowerLoad(
Op, DAG);
1813 return LowerStore(
Op, DAG);
1822 return LowerMUL_LOHI(
Op, DAG);
1824 return LowerUADDO(
Op, DAG);
1839 return std::nullopt;
1858 SDVTList Tys = DAG.
getVTList(MVT::Other);
1870 SDVTList Tys = DAG.
getVTList(MVT::Other);
1872 return DAG.
getNode(WebAssemblyISD::LOCAL_SET,
DL, Tys,
Ops);
1877 "Encountered an unlowerable store to the wasm_var address space",
1893 "unexpected offset when loading from webassembly global",
false);
1904 "unexpected offset when loading from webassembly local",
false);
1908 return DAG.
getNode(WebAssemblyISD::LOCAL_GET,
DL, {LocalVT, MVT::Other},
1914 "Encountered an unlowerable load from the wasm_var address space",
1922 assert(Subtarget->hasWideArithmetic());
1923 assert(
Op.getValueType() == MVT::i64);
1926 switch (
Op.getOpcode()) {
1928 Opcode = WebAssemblyISD::I64_MUL_WIDE_U;
1931 Opcode = WebAssemblyISD::I64_MUL_WIDE_S;
1952 assert(Subtarget->hasWideArithmetic());
1953 assert(
Op.getValueType() == MVT::i64);
1960 DAG.
getNode(WebAssemblyISD::I64_ADD128,
DL,
1970 assert(Subtarget->hasWideArithmetic());
1971 assert(
N->getValueType(0) == MVT::i128);
1974 switch (
N->getOpcode()) {
1976 Opcode = WebAssemblyISD::I64_ADD128;
1979 Opcode = WebAssemblyISD::I64_SUB128;
1994 LHS_0, LHS_1, RHS_0, RHS_1);
2011 EVT VT = Src.getValueType();
2013 : WebAssembly::COPY_I64,
2016 return Op.getNode()->getNumValues() == 1
2035 if (!Subtarget->getTargetTriple().isOSEmscripten()) {
2037 "Non-Emscripten WebAssembly hasn't implemented "
2038 "__builtin_return_address");
2042 unsigned Depth =
Op.getConstantOperandVal(0);
2044 return makeLibCall(DAG, RTLIB::RETURN_ADDRESS,
Op.getValueType(),
2045 {DAG.getConstant(Depth, DL, MVT::i32)}, CallOptions,
DL)
2054 if (
Op.getConstantOperandVal(0) > 0)
2058 EVT VT =
Op.getValueType();
2065WebAssemblyTargetLowering::LowerGlobalTLSAddress(
SDValue Op,
2071 if (!MF.
getSubtarget<WebAssemblySubtarget>().hasBulkMemory())
2075 const GlobalValue *GV = GA->
getGlobal();
2080 auto model = Subtarget->getTargetTriple().isOSEmscripten()
2095 auto GlobalGet = PtrVT == MVT::i64 ? WebAssembly::GLOBAL_GET_I64
2096 : WebAssembly::GLOBAL_GET_I32;
2107 DAG.
getNode(WebAssemblyISD::WrapperREL,
DL, PtrVT, TLSOffset);
2114 EVT VT =
Op.getValueType();
2115 return DAG.
getNode(WebAssemblyISD::Wrapper,
DL, VT,
2125 EVT VT =
Op.getValueType();
2127 "Unexpected target flags on generic GlobalAddressSDNode");
2129 fail(
DL, DAG,
"Invalid address space for WebAssembly target");
2132 const GlobalValue *GV = GA->
getGlobal();
2140 const char *BaseName;
2149 DAG.
getNode(WebAssemblyISD::Wrapper,
DL, PtrVT,
2153 WebAssemblyISD::WrapperREL,
DL, VT,
2162 return DAG.
getNode(WebAssemblyISD::Wrapper,
DL, VT,
2168WebAssemblyTargetLowering::LowerExternalSymbol(
SDValue Op,
2172 EVT VT =
Op.getValueType();
2173 assert(ES->getTargetFlags() == 0 &&
2174 "Unexpected target flags on generic ExternalSymbolSDNode");
2175 return DAG.
getNode(WebAssemblyISD::Wrapper,
DL, VT,
2198 Ops.push_back(Chain);
2199 Ops.push_back(Index);
2205 for (
auto *
MBB : MBBs)
2212 return DAG.
getNode(WebAssemblyISD::BR_TABLE,
DL, MVT::Other,
Ops);
2224 MFI->getVarargBufferVreg(), PtrVT);
2225 return DAG.
getStore(
Op.getOperand(0),
DL, ArgN,
Op.getOperand(1),
2226 MachinePointerInfo(SV));
2233 switch (
Op.getOpcode()) {
2236 IntNo =
Op.getConstantOperandVal(1);
2239 IntNo =
Op.getConstantOperandVal(0);
2250 case Intrinsic::wasm_lsda: {
2259 DAG.
getNode(WebAssemblyISD::Wrapper,
DL, PtrVT,
2262 DAG.
getNode(WebAssemblyISD::WrapperREL,
DL, PtrVT, Node);
2266 return DAG.
getNode(WebAssemblyISD::Wrapper,
DL, PtrVT, Node);
2269 case Intrinsic::wasm_shuffle: {
2275 while (
OpIdx < 18) {
2284 return DAG.
getNode(WebAssemblyISD::SHUFFLE,
DL,
Op.getValueType(),
Ops);
2287 case Intrinsic::thread_pointer: {
2289 auto GlobalGet = PtrVT == MVT::i64 ? WebAssembly::GLOBAL_GET_I64
2290 : WebAssembly::GLOBAL_GET_I32;
2301WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(
SDValue Op,
2311 assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128());
2315 const SDValue &Extract =
Op.getOperand(0);
2319 MVT ExtractedLaneT =
2323 if (ExtractedVecT == VecT)
2330 unsigned IndexVal =
Index->getAsZExtVal();
2349 assert((UserOpc == WebAssemblyISD::EXTEND_LOW_U ||
2350 UserOpc == WebAssemblyISD::EXTEND_LOW_S) &&
2351 "expected extend_low");
2356 size_t FirstIdx = Mask.size() / 2;
2357 for (
size_t i = 0; i < Mask.size() / 2; ++i) {
2358 if (Mask[i] !=
static_cast<int>(FirstIdx + i)) {
2364 unsigned Opc = UserOpc == WebAssemblyISD::EXTEND_LOW_S
2365 ? WebAssemblyISD::EXTEND_HIGH_S
2366 : WebAssemblyISD::EXTEND_HIGH_U;
2369 ShuffleSrc = DAG.
getBitcast(
Op.getValueType(), ShuffleSrc);
2375WebAssemblyTargetLowering::LowerEXTEND_VECTOR_INREG(
SDValue Op,
2378 EVT VT =
Op.getValueType();
2380 EVT SrcVT = Src.getValueType();
2387 "Unexpected extension factor.");
2390 if (Scale != 2 && Scale != 4 && Scale != 8)
2394 switch (
Op.getOpcode()) {
2399 Ext = WebAssemblyISD::EXTEND_LOW_U;
2402 Ext = WebAssemblyISD::EXTEND_LOW_S;
2413 while (Scale != 1) {
2427 if (
Op.getValueType() != MVT::v2f64 &&
Op.getValueType() != MVT::v4f32)
2431 unsigned &Index) ->
bool {
2432 switch (
Op.getOpcode()) {
2434 Opcode = WebAssemblyISD::CONVERT_LOW_S;
2437 Opcode = WebAssemblyISD::CONVERT_LOW_U;
2441 Opcode = WebAssemblyISD::PROMOTE_LOW;
2447 auto ExtractVector =
Op.getOperand(0);
2454 SrcVec = ExtractVector.getOperand(0);
2455 Index = ExtractVector.getConstantOperandVal(1);
2459 unsigned NumLanes =
Op.getValueType() == MVT::v2f64 ? 2 : 4;
2460 unsigned FirstOpcode = 0, SecondOpcode = 0, ThirdOpcode = 0, FourthOpcode = 0;
2461 unsigned FirstIndex = 0, SecondIndex = 0, ThirdIndex = 0, FourthIndex = 0;
2462 SDValue FirstSrcVec, SecondSrcVec, ThirdSrcVec, FourthSrcVec;
2464 if (!GetConvertedLane(
Op.getOperand(0), FirstOpcode, FirstSrcVec,
2466 !GetConvertedLane(
Op.getOperand(1), SecondOpcode, SecondSrcVec,
2471 if (NumLanes == 4 && (!GetConvertedLane(
Op.getOperand(2), ThirdOpcode,
2472 ThirdSrcVec, ThirdIndex) ||
2473 !GetConvertedLane(
Op.getOperand(3), FourthOpcode,
2474 FourthSrcVec, FourthIndex)))
2477 if (FirstOpcode != SecondOpcode)
2483 if (NumLanes == 4 &&
2484 (FirstOpcode != ThirdOpcode || FirstOpcode != FourthOpcode ||
2485 FirstSrcVec != SecondSrcVec || FirstSrcVec != ThirdSrcVec ||
2486 FirstSrcVec != FourthSrcVec || FirstIndex != 0 || SecondIndex != 1 ||
2487 ThirdIndex != 2 || FourthIndex != 3))
2491 switch (FirstOpcode) {
2492 case WebAssemblyISD::CONVERT_LOW_S:
2493 case WebAssemblyISD::CONVERT_LOW_U:
2494 ExpectedSrcVT = MVT::v4i32;
2496 case WebAssemblyISD::PROMOTE_LOW:
2497 ExpectedSrcVT = NumLanes == 2 ? MVT::v4f32 : MVT::v8i16;
2503 auto Src = FirstSrcVec;
2504 if (NumLanes == 2 &&
2505 (FirstIndex != 0 || SecondIndex != 1 || FirstSrcVec != SecondSrcVec)) {
2508 {
static_cast<int>(FirstIndex),
2509 static_cast<int>(SecondIndex) + 4, -1, -1});
2511 return DAG.
getNode(FirstOpcode,
DL, NumLanes == 2 ? MVT::v2f64 : MVT::v4f32,
2517 MVT VT =
Op.getSimpleValueType();
2518 if (VT == MVT::v8f16) {
2533 const EVT VecT =
Op.getValueType();
2534 const EVT LaneT =
Op.getOperand(0).getValueType();
2536 bool CanSwizzle = VecT == MVT::v16i8;
2557 auto GetSwizzleSrcs = [](
size_t I,
const SDValue &Lane) {
2561 const SDValue &SwizzleSrc = Lane->getOperand(0);
2562 const SDValue &IndexExt = Lane->getOperand(1);
2572 Index->getConstantOperandVal(1) !=
I)
2574 return std::make_pair(SwizzleSrc, SwizzleIndices);
2581 auto GetShuffleSrc = [&](
const SDValue &Lane) {
2586 if (Lane->getOperand(0).getValueType().getVectorNumElements() >
2589 return Lane->getOperand(0);
2592 using ValueEntry = std::pair<SDValue, size_t>;
2595 using SwizzleEntry = std::pair<std::pair<SDValue, SDValue>,
size_t>;
2598 using ShuffleEntry = std::pair<SDValue, size_t>;
2601 auto AddCount = [](
auto &Counts,
const auto &Val) {
2604 if (CountIt == Counts.end()) {
2605 Counts.emplace_back(Val, 1);
2611 auto GetMostCommon = [](
auto &Counts) {
2613 assert(CommonIt != Counts.end() &&
"Unexpected all-undef build_vector");
2617 size_t NumConstantLanes = 0;
2620 for (
size_t I = 0;
I < Lanes; ++
I) {
2625 AddCount(SplatValueCounts, Lane);
2629 if (
auto ShuffleSrc = GetShuffleSrc(Lane))
2630 AddCount(ShuffleCounts, ShuffleSrc);
2632 auto SwizzleSrcs = GetSwizzleSrcs(
I, Lane);
2633 if (SwizzleSrcs.first)
2634 AddCount(SwizzleCounts, SwizzleSrcs);
2639 size_t NumSplatLanes;
2640 std::tie(SplatValue, NumSplatLanes) = GetMostCommon(SplatValueCounts);
2644 size_t NumSwizzleLanes = 0;
2645 if (SwizzleCounts.
size())
2646 std::forward_as_tuple(std::tie(SwizzleSrc, SwizzleIndices),
2647 NumSwizzleLanes) = GetMostCommon(SwizzleCounts);
2651 SDValue ShuffleSrc1, ShuffleSrc2;
2652 size_t NumShuffleLanes = 0;
2653 if (ShuffleCounts.
size()) {
2654 std::tie(ShuffleSrc1, NumShuffleLanes) = GetMostCommon(ShuffleCounts);
2656 [&](
const auto &Pair) {
return Pair.first == ShuffleSrc1; });
2658 if (ShuffleCounts.
size()) {
2659 size_t AdditionalShuffleLanes;
2660 std::tie(ShuffleSrc2, AdditionalShuffleLanes) =
2661 GetMostCommon(ShuffleCounts);
2662 NumShuffleLanes += AdditionalShuffleLanes;
2667 std::function<bool(
size_t,
const SDValue &)> IsLaneConstructed;
2670 if (NumSwizzleLanes >= NumShuffleLanes &&
2671 NumSwizzleLanes >= NumConstantLanes && NumSwizzleLanes >= NumSplatLanes) {
2674 auto Swizzled = std::make_pair(SwizzleSrc, SwizzleIndices);
2675 IsLaneConstructed = [&, Swizzled](
size_t I,
const SDValue &Lane) {
2676 return Swizzled == GetSwizzleSrcs(
I, Lane);
2678 }
else if (NumShuffleLanes >= NumConstantLanes &&
2679 NumShuffleLanes >= NumSplatLanes) {
2689 assert(LaneSize > DestLaneSize);
2690 Scale1 = LaneSize / DestLaneSize;
2696 assert(LaneSize > DestLaneSize);
2697 Scale2 = LaneSize / DestLaneSize;
2702 assert(DestLaneCount <= 16);
2703 for (
size_t I = 0;
I < DestLaneCount; ++
I) {
2705 SDValue Src = GetShuffleSrc(Lane);
2706 if (Src == ShuffleSrc1) {
2708 }
else if (Src && Src == ShuffleSrc2) {
2714 ArrayRef<int> MaskRef(Mask, DestLaneCount);
2716 IsLaneConstructed = [&](size_t,
const SDValue &Lane) {
2717 auto Src = GetShuffleSrc(Lane);
2718 return Src == ShuffleSrc1 || (Src && Src == ShuffleSrc2);
2720 }
else if (NumConstantLanes >= NumSplatLanes) {
2722 for (
const SDValue &Lane :
Op->op_values()) {
2728 uint64_t LaneBits = 128 / Lanes;
2731 Const->getAPIntValue().trunc(LaneBits).getZExtValue(),
2732 SDLoc(Lane), LaneT));
2748 if (NumSplatLanes == 1 &&
Op->getOperand(0) == SplatValue &&
2749 (DestLaneSize == 32 || DestLaneSize == 64)) {
2756 IsLaneConstructed = [&SplatValue](
size_t _,
const SDValue &Lane) {
2757 return Lane == SplatValue;
2762 assert(IsLaneConstructed);
2765 for (
size_t I = 0;
I < Lanes; ++
I) {
2767 if (!Lane.
isUndef() && !IsLaneConstructed(
I, Lane))
2776WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(
SDValue Op,
2780 MVT VecType =
Op.getOperand(0).getSimpleValueType();
2791 for (
int M : Mask) {
2792 for (
size_t J = 0; J < LaneBytes; ++J) {
2796 uint64_t ByteIndex =
M == -1 ? J : (uint64_t)M * LaneBytes + J;
2801 return DAG.
getNode(WebAssemblyISD::SHUFFLE,
DL,
Op.getValueType(),
Ops);
2809 assert(
Op->getOperand(0)->getSimpleValueType(0) == MVT::v2i64);
2814 auto MakeLane = [&](
unsigned I) {
2820 {MakeLane(0), MakeLane(1)});
2824WebAssemblyTargetLowering::LowerAccessVectorElement(
SDValue Op,
2841 EVT LaneT =
Op.getSimpleValueType().getVectorElementType();
2843 if (LaneT.
bitsGE(MVT::i32))
2847 size_t NumLanes =
Op.getSimpleValueType().getVectorNumElements();
2849 unsigned ShiftOpcode =
Op.getOpcode();
2855 for (
size_t i = 0; i < NumLanes; ++i) {
2858 SDValue ShiftedValue = ShiftedElements[i];
2863 DAG.
getNode(ShiftOpcode,
DL, MVT::i32, ShiftedValue, MaskedShiftValue));
2872 assert(
Op.getSimpleValueType().isVector());
2874 uint64_t LaneBits =
Op.getValueType().getScalarSizeInBits();
2875 auto ShiftVal =
Op.getOperand(1);
2878 auto SkipImpliedMask = [](
SDValue MaskOp, uint64_t MaskBits) {
2889 MaskVal == MaskBits)
2896 if (ConstantRHS && ConstantRHS->getAPIntValue() == MaskBits)
2904 ShiftVal = SkipImpliedMask(ShiftVal, LaneBits - 1);
2910 ShiftVal = SkipImpliedMask(ShiftVal, LaneBits - 1);
2915 switch (
Op.getOpcode()) {
2917 Opcode = WebAssemblyISD::VEC_SHL;
2920 Opcode = WebAssemblyISD::VEC_SHR_S;
2923 Opcode = WebAssemblyISD::VEC_SHR_U;
2929 return DAG.
getNode(Opcode,
DL,
Op.getValueType(),
Op.getOperand(0), ShiftVal);
2934 EVT ResT =
Op.getValueType();
2937 if ((ResT == MVT::i32 || ResT == MVT::i64) &&
2938 (SatVT == MVT::i32 || SatVT == MVT::i64))
2941 if (ResT == MVT::v4i32 && SatVT == MVT::i32)
2944 if (ResT == MVT::v8i16 && SatVT == MVT::i16)
2951 return (
Op->getFlags().hasNoNaNs() ||
2954 (
Op->getFlags().hasNoSignedZeros() ||
2962 return DAG.
getNode(WebAssemblyISD::RELAXED_FMIN, SDLoc(
Op),
2963 Op.getValueType(),
Op.getOperand(0),
Op.getOperand(1));
2971 return DAG.
getNode(WebAssemblyISD::RELAXED_FMAX, SDLoc(
Op),
2972 Op.getValueType(),
Op.getOperand(0),
Op.getOperand(1));
2982 auto &DAG = DCI.
DAG;
2989 SDValue Bitcast =
N->getOperand(0);
2992 if (!
N->getOperand(1).isUndef())
2994 SDValue CastOp = Bitcast.getOperand(0);
2996 EVT DstType = Bitcast.getValueType();
2997 if (!SrcType.is128BitVector() ||
2998 SrcType.getVectorNumElements() != DstType.getVectorNumElements())
3001 SrcType,
SDLoc(
N), CastOp, DAG.
getUNDEF(SrcType), Shuffle->getMask());
3011 auto &DAG = DCI.
DAG;
3015 EVT InVT =
N->getOperand(0)->getValueType(0);
3016 EVT ResVT =
N->getValueType(0);
3018 if (ResVT == MVT::v4f32 && (InVT == MVT::v4i16 || InVT == MVT::v4i8))
3020 else if (ResVT == MVT::v2f64 && (InVT == MVT::v2i16 || InVT == MVT::v2i8))
3034 auto &DAG = DCI.
DAG;
3038 EVT VT =
N->getValueType(0);
3052 auto &DAG = DCI.
DAG;
3056 EVT ResVT =
N->getValueType(0);
3060 if (ResVT == MVT::v16i32 &&
N->getOperand(0)->getValueType(0) == MVT::v16i8) {
3064 IsSext ? WebAssemblyISD::EXTEND_LOW_S : WebAssemblyISD::EXTEND_LOW_U;
3066 IsSext ? WebAssemblyISD::EXTEND_HIGH_S : WebAssemblyISD::EXTEND_HIGH_U;
3071 DAG.
getNode(LowOp,
DL, MVT::v4i32, LowHalf),
3072 DAG.
getNode(HighOp,
DL, MVT::v4i32, LowHalf),
3073 DAG.
getNode(LowOp,
DL, MVT::v4i32, HighHalf),
3074 DAG.
getNode(HighOp,
DL, MVT::v4i32, HighHalf),
3081 auto Extract =
N->getOperand(0);
3086 if (IndexNode ==
nullptr)
3088 auto Index = IndexNode->getZExtValue();
3092 if (ResVT == MVT::v8i16) {
3094 Source.getValueType() != MVT::v16i8 || (Index != 0 && Index != 8))
3096 }
else if (ResVT == MVT::v4i32) {
3098 Source.getValueType() != MVT::v8i16 || (Index != 0 && Index != 4))
3100 }
else if (ResVT == MVT::v2i64) {
3102 Source.getValueType() != MVT::v4i32 || (Index != 0 && Index != 2))
3108 bool IsLow = Index == 0;
3110 unsigned Op = IsSext ? (IsLow ? WebAssemblyISD::EXTEND_LOW_S
3111 : WebAssemblyISD::EXTEND_HIGH_S)
3112 : (IsLow ? WebAssemblyISD::EXTEND_LOW_U
3113 : WebAssemblyISD::EXTEND_HIGH_U);
3120 auto &DAG = DCI.
DAG;
3122 auto GetWasmConversionOp = [](
unsigned Op) {
3125 return WebAssemblyISD::TRUNC_SAT_ZERO_S;
3127 return WebAssemblyISD::TRUNC_SAT_ZERO_U;
3129 return WebAssemblyISD::DEMOTE_ZERO;
3134 auto IsZeroSplat = [](
SDValue SplatVal) {
3136 APInt SplatValue, SplatUndef;
3137 unsigned SplatBitSize;
3142 Splat->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
3161 EVT ExpectedConversionType;
3164 switch (ConversionOp) {
3168 ExpectedConversionType = MVT::v2i32;
3171 if (
Conversion.getValueType() == MVT::v2f32) {
3173 ExpectedConversionType = MVT::v2f32;
3174 }
else if (
Conversion.getValueType() == MVT::v4f16) {
3176 ExpectedConversionType = MVT::v4f16;
3185 if (
N->getValueType(0) != ResVT)
3188 if (
Conversion.getValueType() != ExpectedConversionType)
3192 if (!((Source.getValueType() == MVT::v2f64 && ResVT == MVT::v4f32) ||
3193 (Source.getValueType() == MVT::v2f64 && ResVT == MVT::v4i32) ||
3194 (Source.getValueType() == MVT::v4f32 && ResVT == MVT::v8f16)))
3197 if (!IsZeroSplat(
N->getOperand(1)) ||
3198 N->getOperand(1).getValueType() != ExpectedConversionType)
3201 unsigned Op = GetWasmConversionOp(ConversionOp);
3218 auto ConversionOp =
N->getOpcode();
3219 switch (ConversionOp) {
3225 ResVT =
N->getValueType(0);
3231 if (
N->getValueType(0) != ResVT)
3234 auto Concat =
N->getOperand(0);
3238 EVT SourceVT =
Concat.getOperand(0).getValueType();
3240 if (!IsZeroSplat(
Concat.getOperand(1)))
3245 ConcatVT == MVT::v4f64 && SourceVT == MVT::v2f64 && ResVT == MVT::v4f32;
3247 ConcatVT == MVT::v8f32 && SourceVT == MVT::v4f32 && ResVT == MVT::v8f16;
3248 if (!(IsF64ToF32 || IsF32ToF16))
3251 if (ConcatVT != MVT::v4f64 || SourceVT != MVT::v2f64 || ResVT != MVT::v4i32)
3255 unsigned Op = GetWasmConversionOp(ConversionOp);
3261 const SDLoc &
DL,
unsigned VectorWidth) {
3269 unsigned ElemsPerChunk = VectorWidth / ElVT.
getSizeInBits();
3274 IdxVal &= ~(ElemsPerChunk - 1);
3279 Vec->
ops().slice(IdxVal, ElemsPerChunk));
3291 EVT SrcVT = In.getValueType();
3309 EVT InVT = MVT::i16, OutVT = MVT::i8;
3314 unsigned SubSizeInBits = SrcSizeInBits / 2;
3316 OutVT =
EVT::getVectorVT(Ctx, OutVT, SubSizeInBits / OutVT.getSizeInBits());
3342 auto &DAG = DCI.
DAG;
3345 EVT InVT = In.getValueType();
3349 EVT OutVT =
N->getValueType(0);
3356 if (!((InSVT == MVT::i16 || InSVT == MVT::i32 || InSVT == MVT::i64) &&
3357 (OutSVT == MVT::i8 || OutSVT == MVT::i16) && OutVT.
is128BitVector()))
3370 auto &DAG = DCI.
DAG;
3373 EVT VT =
N->getValueType(0);
3374 EVT SrcVT = Src.getValueType();
3385 if (NumElts == 2 || NumElts == 4 || NumElts == 8 || NumElts == 16) {
3388 {DAG.getConstant(Intrinsic::wasm_bitmask, DL, MVT::i32),
3389 DAG.getSExtOrTrunc(N->getOperand(0), DL,
3390 SrcVT.changeVectorElementType(
3391 *DAG.getContext(), Width))}),
3396 if (NumElts == 32 || NumElts == 64) {
3409 EVT ConcatOperandVT =
Concat.getOperand(0).getValueType();
3412 EVT ConcatOperandMaskVT =
3415 EVT ConcatOperandBitmaskVT =
3417 EVT ReturnVT =
N->getValueType(0);
3427 "concat_vectors operands must have the same type");
3431 if (!SetCCVectorOperand ||
3441 DL, ConcatOperandMaskVT, ConcatOperand, SetCCVectorOperand, SetCond);
3442 SDValue ConcatOperandBitmask =
3443 DAG.
getBitcast(ConcatOperandBitmaskVT, ConcatOperandMask);
3444 SDValue ExtendedConcatOperandBitmask =
3449 ReconstructedBitmask = DAG.
getNode(
3450 ISD::SHL,
DL, ReturnVT, ReconstructedBitmask,
3455 ReconstructedBitmask =
3457 ExtendedConcatOperandBitmask);
3460 return ReconstructedBitmask;
3471 if (
N->getConstantOperandVal(0) != Intrinsic::wasm_bitmask)
3482 {DAG.getConstant(Intrinsic::wasm_bitmask, DL, MVT::i32), LHS});
3494 if (
N->getNumOperands() < 2 ||
3498 EVT LT =
LHS.getValueType();
3499 if (LT.getScalarSizeInBits() > 128 / LT.getVectorNumElements())
3502 auto CombineSetCC = [&
N, &DAG](Intrinsic::WASMIntrinsics InPre,
3504 Intrinsic::WASMIntrinsics InPost) {
3505 if (
N->getConstantOperandVal(0) != InPre)
3523 Intrinsic::wasm_alltrue))
3526 Intrinsic::wasm_anytrue))
3529 Intrinsic::wasm_anytrue))
3532 Intrinsic::wasm_alltrue))
3538template <
int MatchRHS,
ISD::CondCode MatchCond,
bool RequiresNegate,
3570 EVT VT =
N->getValueType(0);
3571 EVT OpVT =
X.getValueType();
3575 Attribute::NoImplicitFloat))
3581 !Subtarget->
hasSIMD128() || !isIntEqualitySetCC(CC))
3585 auto IsVectorBitCastCheap = [](
SDValue X) {
3590 if (!IsVectorBitCastCheap(
X) || !IsVectorBitCastCheap(
Y))
3600 : Intrinsic::wasm_anytrue,
3614 EVT VT =
N->getValueType(0);
3625 EVT FromVT =
LHS->getOperand(0).getValueType();
3630 if (NumElts != 2 && NumElts != 4 && NumElts != 8 && NumElts != 16)
3636 auto &DAG = DCI.
DAG;
3667 EVT VT =
N->getValueType(0);
3668 if (VT != MVT::v8i32 && VT != MVT::v16i32)
3674 if (
LHS.getOpcode() !=
RHS.getOpcode())
3681 if (
LHS->getOperand(0).getValueType() !=
RHS->getOperand(0).getValueType())
3684 EVT FromVT =
LHS->getOperand(0).getValueType();
3686 if (EltTy != MVT::i8)
3714 unsigned ExtendLowOpc =
3715 IsSigned ? WebAssemblyISD::EXTEND_LOW_S : WebAssemblyISD::EXTEND_LOW_U;
3716 unsigned ExtendHighOpc =
3717 IsSigned ? WebAssemblyISD::EXTEND_HIGH_S : WebAssemblyISD::EXTEND_HIGH_U;
3719 auto GetExtendLow = [&DAG, &
DL, &ExtendLowOpc](
EVT VT,
SDValue Op) {
3726 if (NumElts == 16) {
3727 SDValue LowLHS = GetExtendLow(MVT::v8i16, ExtendInLHS);
3728 SDValue LowRHS = GetExtendLow(MVT::v8i16, ExtendInRHS);
3734 GetExtendLow(MVT::v4i32, MulLow),
3736 GetExtendLow(MVT::v4i32, MulHigh),
3745 SDValue Lo = GetExtendLow(MVT::v4i32, MulLow);
3755 EVT VT =
N->getValueType(0);
3764 if (VT != MVT::v8i8 && VT != MVT::v16i8)
3771 EVT MulVT = MVT::v8i16;
3773 if (VT == MVT::v8i8) {
3779 DAG.
getNode(WebAssemblyISD::EXTEND_LOW_U,
DL, MulVT, PromotedLHS);
3781 DAG.
getNode(WebAssemblyISD::EXTEND_LOW_U,
DL, MulVT, PromotedRHS);
3786 MVT::v16i8,
DL, MulLow, DAG.
getUNDEF(MVT::v16i8),
3787 {0, 2, 4, 6, 8, 10, 12, 14, -1, -1, -1, -1, -1, -1, -1, -1});
3790 assert(VT == MVT::v16i8 &&
"Expected v16i8");
3794 DAG.
getNode(WebAssemblyISD::EXTEND_HIGH_U,
DL, MulVT,
LHS);
3796 DAG.
getNode(WebAssemblyISD::EXTEND_HIGH_U,
DL, MulVT,
RHS);
3805 VT,
DL, MulLow, MulHigh,
3806 {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30});
3814 EVT InVT = In.getValueType();
3819 if (NumElems < RequiredNumElems) {
3826 EVT OutVT =
N->getValueType(0);
3831 if (OutElTy != MVT::i8 && OutElTy != MVT::i16)
3838 EVT FPVT =
N->getOperand(0)->getValueType(0);
3856 EVT NarrowedVT = OutElTy == MVT::i8 ? MVT::v16i8 : MVT::v8i16;
3884 EVT VT =
N->getValueType(0);
3885 if (VT != MVT::v8i32)
3890 unsigned ExtOpc =
LHS.getOpcode();
3900 if (FromVT != MVT::v8i16)
3908 for (
unsigned I = 0;
I < NumElts; ++
I) {
3913 const APInt &ShiftAmt =
C->getAPIntValue();
3914 if (ShiftAmt.
uge(MaxValidShift))
3923 unsigned ExtLowOpc =
3924 IsSigned ? WebAssemblyISD::EXTEND_LOW_S : WebAssemblyISD::EXTEND_LOW_U;
3925 unsigned ExtHighOpc =
3926 IsSigned ? WebAssemblyISD::EXTEND_HIGH_S : WebAssemblyISD::EXTEND_HIGH_U;
3928 EVT HalfVT = MVT::v4i32;
3939WebAssemblyTargetLowering::PerformDAGCombine(
SDNode *
N,
3940 DAGCombinerInfo &DCI)
const {
3941 switch (
N->getOpcode()) {
static SDValue performMulCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
static SDValue performTruncateCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI)
static SDValue performSETCCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis false
Function Alias Analysis Results
static void fail(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg, SDValue Val={})
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
const HexagonInstrInfo * TII
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Register const TargetRegisterInfo * TRI
Promote Memory to Register
MachineInstr unsigned OpIdx
static SDValue performVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG, const RISCVSubtarget &Subtarget, const RISCVTargetLowering &TLI)
static SDValue combineVectorSizedSetCCEquality(EVT VT, SDValue X, SDValue Y, ISD::CondCode CC, const SDLoc &DL, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
Try to map an integer comparison with size > XLEN to vector instructions before type legalization spl...
const SmallVectorImpl< MachineOperand > & Cond
Contains matchers for matching SelectionDAG nodes and values.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static bool callingConvSupported(CallingConv::ID CallConv)
static MachineBasicBlock * LowerFPToInt(MachineInstr &MI, DebugLoc DL, MachineBasicBlock *BB, const TargetInstrInfo &TII, bool IsUnsigned, bool Int64, bool Float64, unsigned LoweredOpcode)
static SDValue TryWideExtMulCombine(SDNode *N, SelectionDAG &DAG)
static MachineBasicBlock * LowerMemcpy(MachineInstr &MI, DebugLoc DL, MachineBasicBlock *BB, const TargetInstrInfo &TII, bool Int64)
static std::optional< unsigned > IsWebAssemblyLocal(SDValue Op, SelectionDAG &DAG)
static SDValue performVectorExtendCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue performVectorNonNegToFPCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG)
static SDValue performAnyAllCombine(SDNode *N, SelectionDAG &DAG)
static MachineBasicBlock * LowerCallResults(MachineInstr &CallResults, DebugLoc DL, MachineBasicBlock *BB, const WebAssemblySubtarget *Subtarget, const TargetInstrInfo &TII)
static SDValue TryMatchTrue(SDNode *N, EVT VecVT, SelectionDAG &DAG)
static SDValue GetExtendHigh(SDValue Op, unsigned UserOpc, EVT VT, SelectionDAG &DAG)
SDValue performConvertFPCombine(SDNode *N, SelectionDAG &DAG)
static SDValue performBitmaskCombine(SDNode *N, SelectionDAG &DAG)
static SDValue performVectorTruncZeroCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static bool IsWebAssemblyGlobal(SDValue Op)
static MachineBasicBlock * LowerMemset(MachineInstr &MI, DebugLoc DL, MachineBasicBlock *BB, const TargetInstrInfo &TII, bool Int64)
static bool HasNoSignedZerosOrNaNs(SDValue Op, SelectionDAG &DAG)
SDValue DoubleVectorWidth(SDValue In, unsigned RequiredNumElems, SelectionDAG &DAG)
static SDValue performVectorExtendToFPCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Convert ({u,s}itofp vec) --> ({u,s}itofp ({s,z}ext vec)) so it doesn't get split up into scalar instr...
static SDValue performShiftCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue LowerConvertLow(SDValue Op, SelectionDAG &DAG)
static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG, const SDLoc &DL, unsigned VectorWidth)
static SDValue performBitcastCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue truncateVectorWithNARROW(EVT DstVT, SDValue In, const SDLoc &DL, SelectionDAG &DAG)
This file defines the interfaces that WebAssembly uses to lower LLVM code into a selection DAG.
This file provides WebAssembly-specific target descriptions.
This file declares WebAssembly-specific per-machine-function information.
This file declares the WebAssembly-specific subclass of TargetSubtarget.
This file declares the WebAssembly-specific subclass of TargetMachine.
This file contains the declaration of the WebAssembly-specific type parsing utility functions.
This file contains the declaration of the WebAssembly-specific utility functions.
static constexpr int Concat[]
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
an instruction that atomically reads a memory location, combines it with another value,...
BinOp getOperation() const
LLVM Basic Block Representation.
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
Diagnostic information for unsupported feature in backend.
static constexpr ElementCount getFixed(ScalarTy MinVal)
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
FunctionType * getFunctionType() const
Returns the FunctionType for me.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
int64_t getOffset() const
LLVM_ABI unsigned getAddressSpace() const
unsigned getTargetFlags() const
const GlobalValue * getGlobal() const
ThreadLocalMode getThreadLocalMode() const
Type * getValueType() const
unsigned getTargetFlags() const
This is an important class for using LLVM in a threaded context.
LLVM_ABI void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
Tracks which library functions to use for a particular subtarget.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
Describe properties that are true of each instruction in the target description file.
bool is128BitVector() const
Return true if this is a 128-bit vector type.
@ INVALID_SIMPLE_VALUE_TYPE
static auto integer_fixedlen_vector_valuetypes()
MVT changeVectorElementType(MVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
bool isFixedLengthVector() const
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator insertAfter(iterator I, MachineInstr *MI)
Insert MI into the instruction list after I.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
LLVM_ABI int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
unsigned getFunctionNumber() const
getFunctionNumber - Return a unique ID for the current function.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
const char * createExternalSymbolName(StringRef Name)
Allocate a string and populate it with the given external symbol name.
MCContext & getContext() const
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
mop_range defs()
Returns all explicit operands that are register definitions.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
LLVM_ABI void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
mop_range uses()
Returns all operands which may be register uses.
LLVM_ABI void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
const MachineOperand & getOperand(unsigned i) const
LLVM_ABI MachineInstrBundleIterator< MachineInstr > eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const std::vector< MachineJumpTableEntry > & getJumpTables() const
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setIsKill(bool Val=true)
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(MCRegister Reg, Register vreg=Register())
addLiveIn - Add the specified register as a live-in.
unsigned getAddressSpace() const
Return the address space for the associated pointer.
MachineMemOperand * getMemOperand() const
Return the unique MachineMemOperand object describing the memory reference performed by operation.
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
const SDValue & getOperand(unsigned Num) const
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI bool isKnownNeverLogicalZero(SDValue Op, const APInt &DemandedElts, unsigned Depth=0) const
Test whether the given floating point SDValue (or all elements of it, if it is a vector) is known to ...
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getShiftAmountConstant(uint64_t Val, EVT VT, const SDLoc &DL)
LLVM_ABI SDValue getSplatValue(SDValue V, bool LegalTypes=false)
If V is a splat vector, return its scalar source operand by extracting that element from the source v...
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI void ExtractVectorElements(SDValue Op, SmallVectorImpl< SDValue > &Args, unsigned Start=0, unsigned Count=0, EVT EltVT=EVT())
Append the extracted elements from Start to Count out of the vector Op in Args.
LLVM_ABI SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
LLVM_ABI SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
LLVM_ABI SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=LocationSize::precise(0), const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false, SDNodeFlags Flags={})
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
const DataLayout & getDataLayout() const
SDValue getTargetFrameIndex(int FI, EVT VT)
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI bool SignBitIsZero(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero.
LLVM_ABI SDValue getBasicBlock(MachineBasicBlock *MBB)
LLVM_ABI SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
const TargetMachine & getTarget() const
LLVM_ABI SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI bool isKnownNeverNaN(SDValue Op, const APInt &DemandedElts, bool SNaN=false, unsigned Depth=0) const
Test whether the given SDValue (or all elements of it, if it is a vector) is known to never be NaN in...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
MachineFunction & getMachineFunction() const
SDValue getPOISON(EVT VT)
Return a POISON node. POISON does not have a useful SDLoc.
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVMContext * getContext() const
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
LLVM_ABI SDValue getMCSymbol(MCSymbol *Sym, EVT VT)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
LLVM_ABI SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
const SDValue & getBasePtr() const
const SDValue & getOffset() const
const SDValue & getValue() const
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
const TargetMachine & getTargetMachine() const
unsigned MaxLoadsPerMemcmp
Specify maximum number of load instructions per memcmp call.
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
void setPartialReduceMLAAction(unsigned Opc, MVT AccVT, MVT InputVT, LegalizeAction Action)
Indicate how a PARTIAL_REDUCE_U/SMLA node with Acc type AccVT and Input type InputVT should be treate...
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
unsigned MaxLoadsPerMemcmpOptSize
Likewise for functions with the OptSize attribute.
virtual bool isBinOp(unsigned Opcode) const
Return true if the node is a math/logic binary operator.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
bool isOperationLegalOrCustomOrPromote(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
bool isPositionIndependent() const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
TargetLowering(const TargetLowering &)=delete
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const
Return true if folding a constant offset with the given GlobalAddress is legal.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::LibcallImpl LibcallImpl, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
Primary interface to the complete machine description for the target machine.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
The instances of the Type class are immutable: once they are created, they are never changed.
bool isFunctionTy() const
True if this is an instance of FunctionType.
static LLVM_ABI Type * getDoubleTy(LLVMContext &C)
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
A Use represents the edge between a Value definition and its users.
LLVM_ABI const Value * stripPointerCastsAndAliases() const
Strip off pointer casts, all-zero GEPs, address space casts, and aliases.
static std::optional< unsigned > getLocalForStackObject(MachineFunction &MF, int FrameIndex)
bool hasCallIndirectOverlong() const
bool hasReferenceTypes() const
WebAssemblyTargetLowering(const TargetMachine &TM, const WebAssemblySubtarget &STI)
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const override
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const override
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ Swift
Calling convention for Swift.
@ PreserveMost
Used for runtime calls that preserves most registers.
@ CXX_FAST_TLS
Used for access functions.
@ WASM_EmscriptenInvoke
For emscripten __invoke_* functions.
@ Cold
Attempts to make code in the caller as efficient as possible under the assumption that the call is no...
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
@ C
The default llvm calling convention, compatible with C.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ PARTIAL_REDUCE_SMLA
PARTIAL_REDUCE_[U|S]MLA(Accumulator, Input1, Input2) The partial reduction nodes sign or zero extend ...
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ FMULADD
FMULADD - Performs a * b + c, with, or without, intermediate rounding.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ BR_CC
BR_CC - Conditional branch.
@ BRIND
BRIND - Indirect branch.
@ BR_JT
BR_JT - Jumptable branch.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum maximum on two values, following IEEE-754 definition...
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ TargetConstant
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification,...
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ FREEZE
FREEZE - FREEZE(VAL) returns an arbitrary value if VAL is UNDEF (or is evaluated to UNDEF),...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ CLEAR_CACHE
llvm.clear_cache intrinsic Operands: Input Chain, Start Addres, End Address Outputs: Output Chain
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ FMINIMUMNUM
FMINIMUMNUM/FMAXIMUMNUM - minimumnum/maximumnum that is same with FMINNUM_IEEE and FMAXNUM_IEEE besid...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
LLVM_ABI bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
This namespace contains an enum with a value for every intrinsic/builtin function known by LLVM.
OperandFlags
These are flags set on operands, but should be considered private, all access should go through the M...
auto m_Value()
Match an arbitrary value and ignore it.
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
bool sd_match(SDNode *N, const SelectionDAG *DAG, Pattern &&P)
CondCode_match m_SpecificCondCode(ISD::CondCode CC)
Match a conditional code SDNode with a specific ISD::CondCode.
CondCode_match m_CondCode()
Match any conditional code SDNode.
TernaryOpc_match< T0_P, T1_P, T2_P, true, false > m_c_SetCC(const T0_P &LHS, const T1_P &RHS, const T2_P &CC)
MCSymbolWasm * getOrCreateFunctionTableSymbol(MCContext &Ctx, const WebAssemblySubtarget *Subtarget)
Returns the __indirect_function_table, for use in call_indirect and in function bitcasts.
@ WASM_ADDRESS_SPACE_EXTERNREF
@ WASM_ADDRESS_SPACE_FUNCREF
bool isWebAssemblyFuncrefType(const Type *Ty)
Return true if this is a WebAssembly Funcref Type.
bool isWebAssemblyTableType(const Type *Ty)
Return true if the table represents a WebAssembly table type.
MCSymbolWasm * getOrCreateFuncrefCallTableSymbol(MCContext &Ctx, const WebAssemblySubtarget *Subtarget)
Returns the __funcref_call_table, for use in funcref calls when lowered to table.set + call_indirect.
bool isValidAddressSpace(unsigned AS)
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo, const LibcallLoweringInfo *libcallLowering)
bool canLowerReturn(size_t ResultSize, const WebAssemblySubtarget *Subtarget)
Returns true if the function's return value(s) can be lowered directly, i.e., not indirectly via a po...
bool isWasmVarAddressSpace(unsigned AS)
NodeAddr< NodeBase * > Node
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
FunctionAddr VTableAddr Value
void computeSignatureVTs(const FunctionType *Ty, const Function *TargetFunc, const Function &ContextFunc, const TargetMachine &TM, SmallVectorImpl< MVT > &Params, SmallVectorImpl< MVT > &Results)
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
LLVM_ABI SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
DWARFExpression::Operation Op
auto max_element(R &&Range)
Provide wrappers to std::max_element which take ranges instead of having to pass begin/end explicitly...
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
void computeLegalValueVTs(const WebAssemblyTargetLowering &TLI, LLVMContext &Ctx, const DataLayout &DL, Type *Ty, SmallVectorImpl< MVT > &ValueVTs)
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
bool isByteSized() const
Return true if the bit size is a multiple of 8.
uint64_t getScalarSizeInBits() const
EVT changeVectorElementType(LLVMContext &Context, EVT EltVT) const
Return a VT for a vector type whose attributes match ourselves with the exception of the element type...
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
bool is128BitVector() const
Return true if this is a 128-bit vector type.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
EVT widenIntegerVectorElementType(LLVMContext &Context) const
Return a VT for an integer vector type with the size of the elements doubled.
bool isFixedLengthVector() const
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
bool bitsGE(EVT VT) const
Return true if this has no less bits than VT.
bool is256BitVector() const
Return true if this is a 256-bit vector type.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
EVT changeElementType(LLVMContext &Context, EVT EltVT) const
Return a VT for a type whose attributes match ourselves with the exception of the element type that i...
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInConsecutiveRegs() const
Align getNonZeroOrigAlign() const
bool isSwiftError() const
unsigned getByValSize() const
bool isInConsecutiveRegsLast() const
bool isSwiftAsync() const
Align getNonZeroByValAlign() const
unsigned getBitWidth() const
Get the bit width of this value.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
These are IR-level optimization flags that may be propagated to SDNodes.
bool isBeforeLegalize() const
This structure is used to pass arguments to makeLibCall function.