27#include "llvm/IR/IntrinsicsS390.h"
37#define DEBUG_TYPE "systemz-lower"
43 cl::desc(
"Verify that narrow int args are properly extended per the "
50 : Op0(Op0In), Op1(Op1In), Chain(ChainIn),
51 Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {}
101 if (Subtarget.hasHighWord())
107 if (Subtarget.hasVector()) {
116 if (Subtarget.hasVectorEnhancements1())
121 if (Subtarget.hasVector()) {
130 if (Subtarget.hasVector())
157 for (
unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
158 I <= MVT::LAST_FP_VALUETYPE;
184 for (
unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
185 I <= MVT::LAST_INTEGER_VALUETYPE;
216 if (Subtarget.hasPopulationCount())
242 (!Subtarget.hasFPExtension() && VT == MVT::i32) ?
Promote :
Custom;
263 if (!Subtarget.hasVectorEnhancements3()) {
290 if (Subtarget.hasVectorEnhancements3()) {
333 {MVT::i8, MVT::i16, MVT::i32},
Legal);
335 {MVT::i8, MVT::i16},
Legal);
356 if (Subtarget.hasMiscellaneousExtensions4()) {
363 if (Subtarget.hasMiscellaneousExtensions3()) {
456 if (VT != MVT::v2i64 || Subtarget.hasVectorEnhancements3()) {
461 if (Subtarget.hasVectorEnhancements3() &&
462 VT != MVT::v16i8 && VT != MVT::v8i16) {
472 if (Subtarget.hasVectorEnhancements1())
506 if (Subtarget.hasVector()) {
528 if (Subtarget.hasVectorEnhancements2()) {
554 for (
MVT VT : {MVT::f32, MVT::f64, MVT::f128}) {
558 for (
auto Op : {ISD::LOAD, ISD::ATOMIC_LOAD, ISD::STORE, ISD::ATOMIC_STORE})
568 for (
unsigned I = MVT::FIRST_FP_VALUETYPE;
569 I <= MVT::LAST_FP_VALUETYPE;
577 if (Subtarget.hasFPExtension()) {
605 if (Subtarget.hasFPExtension()) {
621 if (Subtarget.hasVector()) {
669 if (Subtarget.hasVectorEnhancements1()) {
676 if (Subtarget.hasVectorEnhancements1()) {
732 for (
auto VT : { MVT::f32, MVT::f64, MVT::f128,
733 MVT::v4f32, MVT::v2f64 }) {
742 if (!Subtarget.hasVectorEnhancements1()) {
748 if (Subtarget.hasVectorEnhancements1())
758 if (Subtarget.hasVectorEnhancements1()) {
770 if (!Subtarget.hasVector()) {
781 if (Subtarget.isTargetzOS()) {
842 return Subtarget.hasSoftFloat();
867 return Subtarget.hasVectorEnhancements1();
880 if (!Subtarget.hasVector() ||
881 (isFP128 && !Subtarget.hasVectorEnhancements1()))
890 uint64_t Byte = IntBits.lshr(
I * 8).trunc(8).getZExtValue();
903 if (SplatBitSize > 64)
910 OpVals.push_back(((
unsigned) SignedValue));
918 if (
TII->isRxSBGMask(
Value, SplatBitSize, Start, End)) {
922 OpVals.push_back(Start - (64 - SplatBitSize));
923 OpVals.push_back(End - (64 - SplatBitSize));
936 uint64_t SplatBitsZ = SplatBits.getZExtValue();
937 uint64_t SplatUndefZ = SplatUndef.getZExtValue();
949 return TryValue(SplatBitsZ | Middle);
958 assert(IntBits.getBitWidth() == 128 &&
"Unsupported APInt.");
964 unsigned HalfSize = Width / 2;
969 if (HighValue != LowValue || 8 > HalfSize)
972 SplatBits = HighValue;
976 SplatBitSize = Width;
984 BVN->
isConstantSplat(IntBits, SplatUndef, SplatBitSize, HasAnyUndefs, 128,
988 BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, 8,
993 bool ForCodeSize)
const {
995 if (Imm.isZero() || Imm.isNegZero())
1016 assert(
TRI->isTypeLegalForClass(*RC, MVT::i32) &&
"Invalid destination!");
1018 Register MainDstReg =
MRI.createVirtualRegister(RC);
1019 Register RestoreDstReg =
MRI.createVirtualRegister(RC);
1022 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
"Invalid Pointer Size!");
1075 const int64_t FPOffset = 0;
1084 Register LabelReg =
MRI.createVirtualRegister(PtrRC);
1096 auto *SpecialRegs = Subtarget.getSpecialRegisters();
1097 bool HasFP = Subtarget.getFrameLowering()->hasFP(*MF);
1100 .
addReg(SpecialRegs->getFramePointerRegister())
1108 .
addReg(SpecialRegs->getStackPointerRegister())
1116 Register BCReg =
MRI.createVirtualRegister(PtrRC);
1119 .
addReg(SpecialRegs->getStackPointerRegister())
1120 .
addImm(TFL->getBackchainOffset(*MF))
1131 MIB =
BuildMI(*ThisMBB,
MI,
DL,
TII->get(SystemZ::EH_SjLj_Setup))
1135 MIB.
addRegMask(RegInfo->getNoPreservedMask());
1156 MI.eraseFromParent();
1172 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
"Invalid Pointer Size!");
1175 auto *SpecialRegs = Subtarget.getSpecialRegisters();
1182 const int64_t FPOffset = 0;
1194 SpecialRegs->getFramePointerRegister())
1216 SpecialRegs->getStackPointerRegister())
1225 .
addReg(SpecialRegs->getStackPointerRegister())
1226 .
addImm(TFL->getBackchainOffset(*MF))
1232 MI.eraseFromParent();
1262 if (Subtarget.hasInterlockedAccess1() &&
1295 EVT VT =
Y.getValueType();
1298 if (VT == MVT::i32 || VT == MVT::i64)
1299 return Subtarget.hasMiscellaneousExtensions3();
1302 if (VT.
isVector() || VT == MVT::i128)
1303 return Subtarget.hasVector();
1331 bool MVC = Ty->isIntegerTy(8);
1337static AddressingMode
1340 switch (
II->getIntrinsicID()) {
1342 case Intrinsic::memset:
1343 case Intrinsic::memmove:
1344 case Intrinsic::memcpy:
1351 if (SingleUser->getParent() ==
I->getParent()) {
1354 if (
C->getBitWidth() <= 64 &&
1364 if (LoadI->hasOneUse() && LoadI->getParent() ==
I->getParent())
1378 I->getOperand(0)->getType());
1380 bool IsVectorAccess = MemAccessTy->isVectorTy();
1385 Value *DataOp =
I->getOperand(0);
1387 IsVectorAccess =
true;
1393 User *LoadUser = *
I->user_begin();
1395 IsVectorAccess =
true;
1398 if (IsFPAccess || IsVectorAccess)
1417 Subtarget.hasVector() && (Ty->isVectorTy() || Ty->isIntegerTy(128));
1427 return AM.
Scale == 0;
1434 LLVMContext &Context, std::vector<EVT> &MemOps,
unsigned Limit,
1435 const MemOp &
Op,
unsigned DstAS,
unsigned SrcAS,
1436 const AttributeList &FuncAttributes)
const {
1437 const int MVCFastLen = 16;
1439 if (Limit != ~
unsigned(0)) {
1441 if (
Op.isMemcpy() &&
Op.allowOverlap() &&
Op.size() <= MVCFastLen)
1443 if (
Op.isMemset() &&
Op.size() - 1 <= MVCFastLen)
1445 if (
Op.isZeroMemset())
1450 DstAS, SrcAS, FuncAttributes);
1455 const AttributeList &FuncAttributes)
const {
1456 return Subtarget.hasVector() ? MVT::v2i64 : MVT::Other;
1460 if (!FromType->isIntegerTy() || !ToType->
isIntegerTy())
1462 unsigned FromBits = FromType->getPrimitiveSizeInBits().getFixedValue();
1464 return FromBits > ToBits;
1472 return FromBits > ToBits;
1481 if (Constraint.
size() == 1) {
1482 switch (Constraint[0]) {
1508 }
else if (Constraint.
size() == 2 && Constraint[0] ==
'Z') {
1509 switch (Constraint[1]) {
1520 if (
StringRef(
"{@cc}").compare(Constraint) == 0)
1530 Value *CallOperandVal = Info.CallOperandVal;
1533 if (!CallOperandVal)
1537 switch (*Constraint) {
1556 if (Subtarget.hasVector())
1587 if (
C->getZExtValue() == 0x7fffffff)
1597static std::pair<unsigned, const TargetRegisterClass *>
1599 const unsigned *Map,
unsigned Size) {
1600 assert(*(Constraint.
end()-1) ==
'}' &&
"Missing '}'");
1601 if (isdigit(Constraint[2])) {
1606 return std::make_pair(Map[Index], RC);
1608 return std::make_pair(0U,
nullptr);
1611std::pair<unsigned, const TargetRegisterClass *>
1614 if (Constraint.
size() == 1) {
1616 switch (Constraint[0]) {
1621 return std::make_pair(0U, &SystemZ::GR64BitRegClass);
1623 return std::make_pair(0U, &SystemZ::GR128BitRegClass);
1624 return std::make_pair(0U, &SystemZ::GR32BitRegClass);
1628 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass);
1629 else if (VT == MVT::i128)
1630 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass);
1631 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass);
1634 return std::make_pair(0U, &SystemZ::GRH32BitRegClass);
1639 return std::make_pair(0U, &SystemZ::FP16BitRegClass);
1641 return std::make_pair(0U, &SystemZ::FP64BitRegClass);
1643 return std::make_pair(0U, &SystemZ::FP128BitRegClass);
1644 return std::make_pair(0U, &SystemZ::FP32BitRegClass);
1649 if (Subtarget.hasVector()) {
1651 return std::make_pair(0U, &SystemZ::VR16BitRegClass);
1653 return std::make_pair(0U, &SystemZ::VR32BitRegClass);
1655 return std::make_pair(0U, &SystemZ::VR64BitRegClass);
1656 return std::make_pair(0U, &SystemZ::VR128BitRegClass);
1665 auto getVTSizeInBits = [&VT]() {
1673 if (Constraint[1] ==
'r') {
1674 if (getVTSizeInBits() == 32)
1677 if (getVTSizeInBits() == 128)
1683 if (Constraint[1] ==
'f') {
1685 return std::make_pair(
1687 if (getVTSizeInBits() == 16)
1690 if (getVTSizeInBits() == 32)
1693 if (getVTSizeInBits() == 128)
1699 if (Constraint[1] ==
'v') {
1700 if (!Subtarget.hasVector())
1701 return std::make_pair(
1703 if (getVTSizeInBits() == 16)
1706 if (getVTSizeInBits() == 32)
1709 if (getVTSizeInBits() == 64)
1715 if (Constraint[1] ==
'@') {
1716 if (
StringRef(
"{@cc}").compare(Constraint) == 0)
1717 return std::make_pair(0u, &SystemZ::GR32BitRegClass);
1730 .
Case(
"r4", Subtarget.isTargetXPLINK64() ? SystemZ::R4D
1731 : SystemZ::NoRegister)
1733 Subtarget.isTargetELF() ? SystemZ::R15D : SystemZ::NoRegister)
1740 const Constant *PersonalityFn)
const {
1741 return Subtarget.isTargetXPLINK64() ? SystemZ::R1D : SystemZ::R6D;
1745 const Constant *PersonalityFn)
const {
1746 return Subtarget.isTargetXPLINK64() ? SystemZ::R2D : SystemZ::R7D;
1761 if (
StringRef(
"{@cc}").compare(OpInfo.ConstraintCode) != 0)
1765 if (OpInfo.ConstraintVT.isVector() || !OpInfo.ConstraintVT.isInteger() ||
1766 OpInfo.ConstraintVT.getSizeInBits() < 8)
1771 MRI.addLiveIn(SystemZ::CC);
1785 if (Constraint.
size() == 1) {
1786 switch (Constraint[0]) {
1791 Op.getValueType()));
1798 Op.getValueType()));
1805 C->getSExtValue(),
SDLoc(
Op),
Op.getValueType()));
1812 C->getSExtValue(),
SDLoc(
Op),
Op.getValueType()));
1817 if (
C->getZExtValue() == 0x7fffffff)
1819 Op.getValueType()));
1830#include "SystemZGenCallingConv.inc"
1834 static const MCPhysReg ScratchRegs[] = { SystemZ::R0D, SystemZ::R1D,
1840 Type *ToType)
const {
1903 if (BitCastToType == MVT::v2i64)
1930 MVT::Untyped,
Hi,
Lo);
1954 unsigned NumParts,
MVT PartVT, std::optional<CallingConv::ID> CC)
const {
1956 if (ValueVT.
getSizeInBits() == 128 && NumParts == 1 && PartVT == MVT::Untyped) {
1967 MVT PartVT,
EVT ValueVT, std::optional<CallingConv::ID> CC)
const {
1968 if (ValueVT.
getSizeInBits() == 128 && NumParts == 1 && PartVT == MVT::Untyped) {
1995 unsigned NumFixedGPRs = 0;
1996 unsigned NumFixedFPRs = 0;
1997 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
2010 RC = &SystemZ::GR32BitRegClass;
2014 RC = &SystemZ::GR64BitRegClass;
2018 RC = &SystemZ::FP16BitRegClass;
2022 RC = &SystemZ::FP32BitRegClass;
2026 RC = &SystemZ::FP64BitRegClass;
2030 RC = &SystemZ::FP128BitRegClass;
2038 RC = &SystemZ::VR128BitRegClass;
2052 if (Subtarget.isTargetXPLINK64()) {
2055 ArgSPOffset += XPRegs.getCallFrameSize();
2066 unsigned SlotOffs = VA.
getLocVT() == MVT::f16 ? 6 : 4;
2070 ArgValue = DAG.
getLoad(LocVT,
DL, Chain, FIN,
2081 unsigned ArgIndex = Ins[
I].OrigArgIndex;
2082 assert (Ins[
I].PartOffset == 0);
2083 while (
I + 1 != E && Ins[
I + 1].OrigArgIndex == ArgIndex) {
2085 unsigned PartOffset = Ins[
I + 1].PartOffset;
2096 if (IsVarArg && Subtarget.isTargetXPLINK64()) {
2102 Subtarget.getSpecialRegisters());
2108 int64_t VarArgOffset = CCInfo.
getStackSize() + Regs->getCallFrameSize();
2113 if (IsVarArg && Subtarget.isTargetELF()) {
2126 int64_t RegSaveOffset =
2141 &SystemZ::FP64BitRegClass);
2153 if (Subtarget.isTargetXPLINK64()) {
2158 Subtarget.getSpecialRegisters());
2159 MRI.addLiveIn(Regs->getADARegister(), ADAvReg);
2171 for (
unsigned I = 0,
E = ArgLocs.
size();
I !=
E; ++
I) {
2178 if (
Reg == SystemZ::R6H ||
Reg == SystemZ::R6L ||
Reg == SystemZ::R6D)
2180 if (Outs[
I].Flags.isSwiftSelf() || Outs[
I].Flags.isSwiftError())
2187 unsigned Offset,
bool LoadAdr =
false) {
2210 bool LoadAddr =
false;
2232 unsigned ADADelta = 0;
2233 unsigned EPADelta = 8;
2239 bool IsInternal = (
G->getGlobal()->hasInternalLinkage() ||
2240 G->getGlobal()->hasPrivateLinkage());
2293 if (Subtarget.isTargetXPLINK64())
2297 verifyNarrowIntegerArgs_Call(Outs, &MF.
getFunction(), Callee);
2301 CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, Ctx);
2320 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
2326 unsigned ArgIndex = Outs[
I].OrigArgIndex;
2328 if (
I + 1 != E && Outs[
I + 1].OrigArgIndex == ArgIndex) {
2330 Type *OrigArgType = CLI.
Args[Outs[
I].OrigArgIndex].Ty;
2336 SlotVT = Outs[
I].VT;
2345 assert (Outs[
I].PartOffset == 0);
2346 while (
I + 1 != E && Outs[
I + 1].OrigArgIndex == ArgIndex) {
2347 SDValue PartValue = OutVals[
I + 1];
2348 unsigned PartOffset = Outs[
I + 1].PartOffset;
2355 SlotVT.
getStoreSize()) &&
"Not enough space for argument part!");
2358 ArgValue = SpillSlot;
2375 if (!StackPtr.getNode())
2382 else if (VA.
getLocVT() == MVT::f16)
2395 if (Subtarget.isTargetXPLINK64() && VA.
needsCustom()) {
2399 RegsToPass.
push_back(std::make_pair(SystemZ::R3D, ShadowArgValue));
2405 if (!MemOpChains.
empty())
2413 if (Subtarget.isTargetXPLINK64()) {
2418 ->getAddressOfCalleeRegister();
2421 Callee = DAG.
getRegister(CalleeReg, Callee.getValueType());
2432 }
else if (IsTailCall) {
2435 Callee = DAG.
getRegister(SystemZ::R1D, Callee.getValueType());
2440 for (
const auto &[Reg,
N] : RegsToPass) {
2447 Ops.push_back(Chain);
2448 Ops.push_back(Callee);
2452 for (
const auto &[Reg,
N] : RegsToPass)
2457 const uint32_t *Mask =
TRI->getCallPreservedMask(MF, CallConv);
2458 assert(Mask &&
"Missing call preserved mask for calling convention");
2463 Ops.push_back(Glue);
2482 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, Ctx);
2489 VA.getLocVT(), Glue);
2506 bool DoesNotReturn,
bool IsReturnValueUsed)
const {
2508 Args.reserve(
Ops.size());
2514 Entry.IsZExt = !Entry.IsSExt;
2515 Args.push_back(Entry);
2526 .
setCallee(CallConv, RetTy, Callee, std::move(Args))
2537 const Type *RetTy)
const {
2540 for (
auto &Out : Outs)
2541 if (Out.ArgVT == MVT::i128)
2545 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, Context);
2546 return RetCCInfo.
CheckReturn(Outs, RetCC_SystemZ);
2558 verifyNarrowIntegerArgs_Ret(Outs, &MF.
getFunction());
2566 if (RetLocs.
empty())
2576 for (
unsigned I = 0, E = RetLocs.
size();
I != E; ++
I) {
2605 unsigned &CCValid) {
2606 unsigned Id =
Op.getConstantOperandVal(1);
2608 case Intrinsic::s390_tbegin:
2613 case Intrinsic::s390_tbegin_nofloat:
2618 case Intrinsic::s390_tend:
2632 unsigned Id =
Op.getConstantOperandVal(0);
2634 case Intrinsic::s390_vpkshs:
2635 case Intrinsic::s390_vpksfs:
2636 case Intrinsic::s390_vpksgs:
2641 case Intrinsic::s390_vpklshs:
2642 case Intrinsic::s390_vpklsfs:
2643 case Intrinsic::s390_vpklsgs:
2648 case Intrinsic::s390_vceqbs:
2649 case Intrinsic::s390_vceqhs:
2650 case Intrinsic::s390_vceqfs:
2651 case Intrinsic::s390_vceqgs:
2652 case Intrinsic::s390_vceqqs:
2657 case Intrinsic::s390_vchbs:
2658 case Intrinsic::s390_vchhs:
2659 case Intrinsic::s390_vchfs:
2660 case Intrinsic::s390_vchgs:
2661 case Intrinsic::s390_vchqs:
2666 case Intrinsic::s390_vchlbs:
2667 case Intrinsic::s390_vchlhs:
2668 case Intrinsic::s390_vchlfs:
2669 case Intrinsic::s390_vchlgs:
2670 case Intrinsic::s390_vchlqs:
2675 case Intrinsic::s390_vtm:
2680 case Intrinsic::s390_vfaebs:
2681 case Intrinsic::s390_vfaehs:
2682 case Intrinsic::s390_vfaefs:
2687 case Intrinsic::s390_vfaezbs:
2688 case Intrinsic::s390_vfaezhs:
2689 case Intrinsic::s390_vfaezfs:
2694 case Intrinsic::s390_vfeebs:
2695 case Intrinsic::s390_vfeehs:
2696 case Intrinsic::s390_vfeefs:
2701 case Intrinsic::s390_vfeezbs:
2702 case Intrinsic::s390_vfeezhs:
2703 case Intrinsic::s390_vfeezfs:
2708 case Intrinsic::s390_vfenebs:
2709 case Intrinsic::s390_vfenehs:
2710 case Intrinsic::s390_vfenefs:
2715 case Intrinsic::s390_vfenezbs:
2716 case Intrinsic::s390_vfenezhs:
2717 case Intrinsic::s390_vfenezfs:
2722 case Intrinsic::s390_vistrbs:
2723 case Intrinsic::s390_vistrhs:
2724 case Intrinsic::s390_vistrfs:
2729 case Intrinsic::s390_vstrcbs:
2730 case Intrinsic::s390_vstrchs:
2731 case Intrinsic::s390_vstrcfs:
2736 case Intrinsic::s390_vstrczbs:
2737 case Intrinsic::s390_vstrczhs:
2738 case Intrinsic::s390_vstrczfs:
2743 case Intrinsic::s390_vstrsb:
2744 case Intrinsic::s390_vstrsh:
2745 case Intrinsic::s390_vstrsf:
2750 case Intrinsic::s390_vstrszb:
2751 case Intrinsic::s390_vstrszh:
2752 case Intrinsic::s390_vstrszf:
2757 case Intrinsic::s390_vfcedbs:
2758 case Intrinsic::s390_vfcesbs:
2763 case Intrinsic::s390_vfchdbs:
2764 case Intrinsic::s390_vfchsbs:
2769 case Intrinsic::s390_vfchedbs:
2770 case Intrinsic::s390_vfchesbs:
2775 case Intrinsic::s390_vftcidb:
2776 case Intrinsic::s390_vftcisb:
2781 case Intrinsic::s390_tdc:
2795 unsigned NumOps =
Op.getNumOperands();
2798 Ops.push_back(
Op.getOperand(0));
2800 Ops.push_back(
Op.getOperand(
I));
2802 assert(
Op->getNumValues() == 2 &&
"Expected only CC result and chain");
2816 unsigned NumOps =
Op.getNumOperands();
2822 assert((
Op.getConstantOperandVal(0) == Intrinsic::s390_tdc &&
I == 1) &&
2823 "Unhandled intrinsic with f16 operand.");
2826 Ops.push_back(CurrOper);
2840 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \
2841 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \
2842 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X
2868 if (!ConstOp1 || ConstOp1->getValueSizeInBits(0) > 64)
2871 int64_t
Value = ConstOp1->getSExtValue();
2887 if (!
C.Op0.hasOneUse() ||
2888 C.Op0.getOpcode() != ISD::LOAD ||
2894 unsigned NumBits = Load->getMemoryVT().getSizeInBits();
2895 if ((NumBits != 8 && NumBits != 16) ||
2896 NumBits != Load->getMemoryVT().getStoreSizeInBits())
2902 if (!ConstOp1 || ConstOp1->getValueSizeInBits(0) > 64)
2905 uint64_t Mask = (1 << NumBits) - 1;
2908 int64_t SignedValue = ConstOp1->getSExtValue();
2915 }
else if (NumBits == 8) {
2941 if (
C.Op0.getValueType() != MVT::i32 ||
2942 Load->getExtensionType() != ExtType) {
2944 Load->getBasePtr(), Load->getPointerInfo(),
2945 Load->getMemoryVT(), Load->getAlign(),
2946 Load->getMemOperand()->getFlags());
2952 if (
C.Op1.getValueType() != MVT::i32 ||
2953 Value != ConstOp1->getZExtValue())
2963 if (Load->getMemoryVT() == MVT::i8)
2966 switch (Load->getExtensionType()) {
2983 if (
C.Op0.getValueType() == MVT::i128)
2985 if (
C.Op0.getValueType() == MVT::f128)
2997 if (ConstOp1 && ConstOp1->getZExtValue() == 0)
3026 unsigned Opcode0 =
C.Op0.getOpcode();
3033 C.Op0.getConstantOperandVal(1) == 0xffffffff)
3048 ((
N->getOperand(0) ==
C.Op0 &&
N->getOperand(1) ==
C.Op1) ||
3049 (
N->getOperand(0) ==
C.Op1 &&
N->getOperand(1) ==
C.Op0))) {
3071 if (C1 && C1->isZero()) {
3073 if (
N->getOpcode() == ISD::FNEG) {
3090 if (
C.Op0.getOpcode() ==
ISD::SHL &&
C.Op0.getValueType() == MVT::i64 &&
3093 if (C1 && C1->getZExtValue() == 32) {
3094 SDValue ShlOp0 =
C.Op0.getOperand(0);
3113 C.Op0.getOperand(0).getOpcode() == ISD::LOAD &&
3116 C.Op1->getAsZExtVal() == 0) {
3118 if (L->getMemoryVT().getStoreSizeInBits().getFixedValue() <=
3119 C.Op0.getValueSizeInBits().getFixedValue()) {
3120 unsigned Type = L->getExtensionType();
3123 C.Op0 =
C.Op0.getOperand(0);
3137 uint64_t Amount = Shift->getZExtValue();
3138 if (Amount >=
N.getValueSizeInBits())
3153 unsigned ICmpType) {
3154 assert(Mask != 0 &&
"ANDs with zero should have been removed by now");
3176 if (EffectivelyUnsigned && CmpVal > 0 && CmpVal <=
Low) {
3182 if (EffectivelyUnsigned && CmpVal <
Low) {
3190 if (CmpVal == Mask) {
3196 if (EffectivelyUnsigned && CmpVal >= Mask -
Low && CmpVal < Mask) {
3202 if (EffectivelyUnsigned && CmpVal > Mask -
Low && CmpVal <= Mask) {
3210 if (EffectivelyUnsigned && CmpVal >= Mask -
High && CmpVal <
High) {
3216 if (EffectivelyUnsigned && CmpVal > Mask -
High && CmpVal <=
High) {
3245 if (
C.Op0.getValueType() == MVT::i128) {
3251 if (Mask && Mask->getAPIntValue() == 0) {
3253 C.Op1 = DAG.
getNode(ISD::BITCAST,
DL, MVT::v16i8,
C.Op0.getOperand(1));
3254 C.Op0 = DAG.
getNode(ISD::BITCAST,
DL, MVT::v16i8,
C.Op0.getOperand(0));
3269 uint64_t CmpVal = ConstOp1->getZExtValue();
3276 NewC.Op0 =
C.Op0.getOperand(0);
3277 NewC.Op1 =
C.Op0.getOperand(1);
3281 MaskVal = Mask->getZExtValue();
3301 MaskVal = -(CmpVal & -CmpVal);
3310 unsigned NewCCMask, ShiftVal;
3314 (MaskVal >> ShiftVal != 0) &&
3315 ((CmpVal >> ShiftVal) << ShiftVal) == CmpVal &&
3317 MaskVal >> ShiftVal,
3321 MaskVal >>= ShiftVal;
3325 (MaskVal << ShiftVal != 0) &&
3326 ((CmpVal << ShiftVal) >> ShiftVal) == CmpVal &&
3328 MaskVal << ShiftVal,
3332 MaskVal <<= ShiftVal;
3343 if (Mask && Mask->getZExtValue() == MaskVal)
3348 C.CCMask = NewCCMask;
3356 if (
C.Op0.getValueType() != MVT::i128)
3367 Src = Src.getOperand(0);
3370 unsigned Opcode = 0;
3371 if (Src.hasOneUse()) {
3372 switch (Src.getOpcode()) {
3384 C.Op0 = Src->getOperand(0);
3385 C.Op1 = Src->getOperand(1);
3389 C.CCMask ^=
C.CCValid;
3402 C.Op0 = DAG.
getNode(ISD::BITCAST,
DL, MVT::v2i64,
C.Op0);
3403 C.Op1 = DAG.
getNode(ISD::BITCAST,
DL, MVT::v2i64,
C.Op1);
3413 bool Swap =
false, Invert =
false;
3432 C.CCMask ^=
C.CCValid;
3443 if (!Mask || Mask->getValueSizeInBits(0) > 64)
3446 if ((~Known.
Zero).getZExtValue() & ~Mask->getZExtValue())
3449 C.Op0 =
C.Op0.getOperand(0);
3461 C.CCValid = CCValid;
3464 C.CCMask = CC < 4 ? 1 << (3 - CC) : 0;
3467 C.CCMask = CC < 4 ? ~(1 << (3 - CC)) : -1;
3471 C.CCMask = CC < 4 ? ~0U << (4 - CC) : -1;
3474 C.CCMask = CC < 4 ? ~(~0U << (4 - CC)) : 0;
3478 C.CCMask = CC < 4 ? ~0U << (3 - CC) : -1;
3481 C.CCMask = CC < 4 ? ~(~0U << (3 - CC)) : 0;
3484 C.CCMask &= CCValid;
3492 bool IsSignaling =
false) {
3495 unsigned Opcode, CCValid;
3507 Comparison
C(CmpOp0, CmpOp1, Chain);
3509 if (
C.Op0.getValueType().isFloatingPoint()) {
3513 else if (!IsSignaling)
3556 if (!
C.Op1.getNode()) {
3558 switch (
C.Op0.getOpcode()) {
3584 EVT IntVT =
C.Op0.getValueType().changeVectorElementTypeToInteger();
3591 return DAG.
getNode(
C.Opcode,
DL, VTs,
C.Chain,
C.Op0,
C.Op1);
3593 return DAG.
getNode(
C.Opcode,
DL, MVT::i32,
C.Op0,
C.Op1);
3602 Op0 = DAG.
getNode(Extend,
DL, MVT::i64, Op0);
3603 Op1 = DAG.
getNode(Extend,
DL, MVT::i64, Op1);
3628 unsigned CCValid,
unsigned CCMask) {
3711 int Mask[] = { Start, -1, Start + 1, -1 };
3731 !Subtarget.hasVectorEnhancements1()) {
3737 SDVTList VTs = DAG.
getVTList(MVT::v2i64, MVT::Other);
3753 SDVTList VTs = DAG.
getVTList(VT, MVT::Other);
3754 return DAG.
getNode(Opcode,
DL, VTs, Chain, CmpOp0, CmpOp1);
3756 return DAG.
getNode(Opcode,
DL, VT, CmpOp0, CmpOp1);
3769 bool IsSignaling)
const {
3772 assert (!IsSignaling || Chain);
3775 bool Invert =
false;
3783 assert(IsFP &&
"Unexpected integer comparison");
3785 DL, VT, CmpOp1, CmpOp0, Chain);
3787 DL, VT, CmpOp0, CmpOp1, Chain);
3791 LT.getValue(1),
GE.getValue(1));
3800 assert(IsFP &&
"Unexpected integer comparison");
3802 DL, VT, CmpOp1, CmpOp0, Chain);
3804 DL, VT, CmpOp0, CmpOp1, Chain);
3808 LT.getValue(1),
GT.getValue(1));
3829 Cmp = getVectorCmp(DAG, Opcode,
DL, VT, CmpOp0, CmpOp1, Chain);
3833 Cmp = getVectorCmp(DAG, Opcode,
DL, VT, CmpOp1, CmpOp0, Chain);
3838 Chain =
Cmp.getValue(1);
3846 if (Chain && Chain.
getNode() !=
Cmp.getNode()) {
3859 EVT VT =
Op.getValueType();
3861 return lowerVectorSETCC(DAG,
DL, VT, CC, CmpOp0, CmpOp1);
3870 bool IsSignaling)
const {
3876 EVT VT =
Op.getNode()->getValueType(0);
3878 SDValue Res = lowerVectorSETCC(DAG,
DL, VT, CC, CmpOp0, CmpOp1,
3879 Chain, IsSignaling);
3935 C.CCMask ^=
C.CCValid;
3988 C.Op1->getAsZExtVal() == 0) {
3995 if (Subtarget.hasVectorEnhancements3() &&
3997 C.Op0.getValueType() == MVT::i128 &&
4013 const GlobalValue *GV =
Node->getGlobal();
4019 if (Subtarget.isPC32DBLSymbol(GV, CM)) {
4022 uint64_t Anchor =
Offset & ~uint64_t(0xfff);
4041 }
else if (Subtarget.isTargetELF()) {
4046 }
else if (Subtarget.isTargetzOS()) {
4077 Chain = DAG.
getCopyToReg(Chain,
DL, SystemZ::R2D, GOTOffset, Glue);
4082 Ops.push_back(Chain);
4084 Node->getValueType(0),
4093 const TargetRegisterInfo *
TRI = Subtarget.getRegisterInfo();
4094 const uint32_t *
Mask =
4096 assert(Mask &&
"Missing call preserved mask for calling convention");
4100 Ops.push_back(Glue);
4103 SDVTList NodeTys = DAG.
getVTList(MVT::Other, MVT::Glue);
4111SDValue SystemZTargetLowering::lowerThreadPointer(
const SDLoc &
DL,
4135 const GlobalValue *GV =
Node->getGlobal();
4143 SDValue TP = lowerThreadPointer(
DL, DAG);
4150 SystemZConstantPoolValue *CPV =
4165 SystemZConstantPoolValue *CPV =
4179 SystemZMachineFunctionInfo* MFI =
4208 SystemZConstantPoolValue *CPV =
4251 if (
CP->isMachineConstantPoolEntry())
4264 auto *TFL = Subtarget.getFrameLowering<SystemZFrameLowering>();
4266 MachineFrameInfo &MFI = MF.getFrameInfo();
4270 unsigned Depth =
Op.getConstantOperandVal(0);
4277 int BackChainIdx = TFL->getOrCreateFramePointerSaveIndex(MF);
4282 if (!MF.getSubtarget<SystemZSubtarget>().hasBackChain())
4288 MachinePointerInfo());
4303 unsigned Depth =
Op.getConstantOperandVal(0);
4308 if (!MF.
getSubtarget<SystemZSubtarget>().hasBackChain())
4311 SDValue FrameAddr = lowerFRAMEADDR(
Op, DAG);
4312 const auto *TFL = Subtarget.getFrameLowering<SystemZFrameLowering>();
4313 int Offset = TFL->getReturnAddressOffset(MF);
4317 MachinePointerInfo());
4322 SystemZCallingConventionRegisters *CCR = Subtarget.getSpecialRegisters();
4324 &SystemZ::GR64BitRegClass);
4332 EVT InVT =
In.getValueType();
4333 EVT ResVT =
Op.getValueType();
4341 LoadN->getBasePtr(), LoadN->getMemOperand());
4347 if (InVT == MVT::i32 && ResVT == MVT::f32) {
4349 if (Subtarget.hasHighWord()) {
4353 MVT::i64,
SDValue(U64, 0), In);
4361 DL, MVT::f32, Out64);
4363 if (InVT == MVT::f32 && ResVT == MVT::i32) {
4366 MVT::f64,
SDValue(U64, 0), In);
4368 if (Subtarget.hasHighWord())
4381 if (Subtarget.isTargetXPLINK64())
4382 return lowerVASTART_XPLINK(
Op, DAG);
4384 return lowerVASTART_ELF(
Op, DAG);
4390 SystemZMachineFunctionInfo *FuncInfo =
4391 MF.
getInfo<SystemZMachineFunctionInfo>();
4401 MachinePointerInfo(SV));
4407 SystemZMachineFunctionInfo *FuncInfo =
4408 MF.
getInfo<SystemZMachineFunctionInfo>();
4417 const unsigned NumFields = 4;
4428 for (
unsigned I = 0;
I < NumFields; ++
I) {
4433 MemOps[
I] = DAG.
getStore(Chain,
DL, Fields[
I], FieldAddr,
4434 MachinePointerInfo(SV,
Offset));
4452 Align(8),
false,
false,
4453 nullptr, std::nullopt, MachinePointerInfo(DstSV),
4454 MachinePointerInfo(SrcSV));
4458SystemZTargetLowering::lowerDYNAMIC_STACKALLOC(
SDValue Op,
4460 if (Subtarget.isTargetXPLINK64())
4461 return lowerDYNAMIC_STACKALLOC_XPLINK(
Op, DAG);
4463 return lowerDYNAMIC_STACKALLOC_ELF(
Op, DAG);
4467SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_XPLINK(
SDValue Op,
4469 const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
4479 uint64_t AlignVal = (RealignOpt ?
Align->getAsZExtVal() : 0);
4482 uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
4483 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;
4489 if (ExtraAlignSpace)
4493 bool IsSigned =
false;
4494 bool DoesNotReturn =
false;
4495 bool IsReturnValueUsed =
false;
4496 EVT VT =
Op.getValueType();
4506 auto &Regs = Subtarget.getSpecialRegisters<SystemZXPLINK64Registers>();
4518 if (ExtraAlignSpace) {
4530SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_ELF(
SDValue Op,
4532 const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
4535 bool StoreBackchain = MF.
getSubtarget<SystemZSubtarget>().hasBackChain();
4544 uint64_t AlignVal = (RealignOpt ?
Align->getAsZExtVal() : 0);
4547 uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
4548 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;
4559 Backchain = DAG.
getLoad(MVT::i64,
DL, Chain, getBackchainAddress(OldSP, DAG),
4560 MachinePointerInfo());
4563 if (ExtraAlignSpace)
4571 DAG.
getVTList(MVT::i64, MVT::Other), Chain, OldSP, NeededSpace);
4587 if (RequiredAlign > StackAlign) {
4597 Chain = DAG.
getStore(Chain,
DL, Backchain, getBackchainAddress(NewSP, DAG),
4598 MachinePointerInfo());
4604SDValue SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET(
4613 unsigned Opcode)
const {
4614 EVT VT =
Op.getValueType();
4620 assert(Subtarget.hasMiscellaneousExtensions2());
4625 Op.getOperand(0),
Op.getOperand(1), Even, Odd);
4631 EVT VT =
Op.getValueType();
4639 else if (Subtarget.hasMiscellaneousExtensions2())
4644 Op.getOperand(0),
Op.getOperand(1),
Ops[1],
Ops[0]);
4679 EVT VT =
Op.getValueType();
4692 Op.getOperand(0),
Op.getOperand(1),
Ops[1],
Ops[0]);
4700 EVT VT =
Op.getValueType();
4720 EVT VT =
Op.getValueType();
4727 Op.getOperand(0),
Op.getOperand(1),
Ops[1],
Ops[0]);
4732 assert(
Op.getValueType() == MVT::i64 &&
"Should be 64-bit operation");
4744 if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff)
4746 else if ((Masks[1] >> 32) == 0xffffffff && uint32_t(Masks[0]) == 0xffffffff)
4783 MVT::i64, HighOp, Low32);
4789 SDNode *
N =
Op.getNode();
4794 if (
N->getValueType(0) == MVT::i128) {
4795 unsigned BaseOp = 0;
4796 unsigned FlagOp = 0;
4797 bool IsBorrow =
false;
4798 switch (
Op.getOpcode()) {
4821 unsigned BaseOp = 0;
4822 unsigned CCValid = 0;
4823 unsigned CCMask = 0;
4825 switch (
Op.getOpcode()) {
4849 SDVTList VTs = DAG.
getVTList(
N->getValueType(0), MVT::i32);
4853 if (
N->getValueType(1) == MVT::i1)
4879 SDNode *
N =
Op.getNode();
4880 MVT VT =
N->getSimpleValueType(0);
4891 if (VT == MVT::i128) {
4892 unsigned BaseOp = 0;
4893 unsigned FlagOp = 0;
4894 bool IsBorrow =
false;
4895 switch (
Op.getOpcode()) {
4922 unsigned BaseOp = 0;
4923 unsigned CCValid = 0;
4924 unsigned CCMask = 0;
4926 switch (
Op.getOpcode()) {
4951 SDVTList VTs = DAG.
getVTList(VT, MVT::i32);
4955 if (
N->getValueType(1) == MVT::i1)
4963 EVT VT =
Op.getValueType();
4965 Op =
Op.getOperand(0);
5013 if (NumSignificantBits == 0)
5019 BitSize = std::min(BitSize, OrigBitSize);
5028 for (int64_t
I = BitSize / 2;
I >= 8;
I =
I / 2) {
5030 if (BitSize != OrigBitSize)
5062 return DAG.
getNode(ISD::MEMBARRIER,
DL, MVT::Other,
Op.getOperand(0));
5067 EVT RegVT =
Op.getValueType();
5069 return lowerATOMIC_LDST_I128(
Op, DAG);
5070 return lowerLoadF16(
Op, DAG);
5076 if (
Node->getMemoryVT().getSizeInBits() == 128)
5077 return lowerATOMIC_LDST_I128(
Op, DAG);
5078 return lowerStoreF16(
Op, DAG);
5085 (
Node->getMemoryVT() == MVT::i128 ||
Node->getMemoryVT() == MVT::f128) &&
5086 "Only custom lowering i128 or f128.");
5099 EVT WideVT = MVT::i32;
5122 unsigned Opcode)
const {
5126 EVT NarrowVT =
Node->getMemoryVT();
5127 EVT WideVT = MVT::i32;
5128 if (NarrowVT == WideVT)
5135 MachineMemOperand *MMO =
Node->getMemOperand();
5146 SDValue AlignedAddr, BitShift, NegBitShift;
5163 SDVTList VTList = DAG.
getVTList(WideVT, MVT::Other);
5164 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift,
5184 EVT MemVT =
Node->getMemoryVT();
5185 if (MemVT == MVT::i32 || MemVT == MVT::i64) {
5187 assert(
Op.getValueType() == MemVT &&
"Mismatched VTs");
5188 assert(Subtarget.hasInterlockedAccess1() &&
5189 "Should have been expanded by AtomicExpand pass.");
5194 return DAG.
getAtomic(ISD::ATOMIC_LOAD_ADD,
DL, MemVT,
5195 Node->getChain(),
Node->getBasePtr(), NegSrc2,
5196 Node->getMemOperand());
5210 MachineMemOperand *MMO =
Node->getMemOperand();
5213 if (
Node->getMemoryVT() == MVT::i128) {
5222 EVT NarrowVT =
Node->getMemoryVT();
5223 EVT WideVT = NarrowVT == MVT::i64 ? MVT::i64 : MVT::i32;
5224 if (NarrowVT == WideVT) {
5225 SDVTList Tys = DAG.
getVTList(WideVT, MVT::i32, MVT::Other);
5226 SDValue Ops[] = { ChainIn, Addr, CmpVal, SwapVal };
5228 DL, Tys,
Ops, NarrowVT, MMO);
5242 SDValue AlignedAddr, BitShift, NegBitShift;
5246 SDVTList VTList = DAG.
getVTList(WideVT, MVT::i32, MVT::Other);
5247 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift,
5250 VTList,
Ops, NarrowVT, MMO);
5264SystemZTargetLowering::getTargetMMOFlags(
const Instruction &
I)
const {
5287 auto *Regs = Subtarget.getSpecialRegisters();
5290 "in GHC calling convention");
5292 Regs->getStackPointerRegister(),
Op.getValueType());
5298 auto *Regs = Subtarget.getSpecialRegisters();
5299 bool StoreBackchain = MF.
getSubtarget<SystemZSubtarget>().hasBackChain();
5303 "in GHC calling convention");
5310 if (StoreBackchain) {
5312 Chain,
DL, Regs->getStackPointerRegister(), MVT::i64);
5313 Backchain = DAG.
getLoad(MVT::i64,
DL, Chain, getBackchainAddress(OldSP, DAG),
5314 MachinePointerInfo());
5317 Chain = DAG.
getCopyToReg(Chain,
DL, Regs->getStackPointerRegister(), NewSP);
5320 Chain = DAG.
getStore(Chain,
DL, Backchain, getBackchainAddress(NewSP, DAG),
5321 MachinePointerInfo());
5328 bool IsData =
Op.getConstantOperandVal(4);
5331 return Op.getOperand(0);
5334 bool IsWrite =
Op.getConstantOperandVal(2);
5341 Node->getMemoryVT(),
Node->getMemOperand());
5345SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(
SDValue Op,
5347 unsigned Opcode, CCValid;
5349 assert(
Op->getNumValues() == 2 &&
"Expected only CC result and chain");
5360SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(
SDValue Op,
5362 unsigned Opcode, CCValid;
5365 if (
Op->getNumValues() == 1)
5367 assert(
Op->getNumValues() == 2 &&
"Expected a CC and non-CC result");
5372 unsigned Id =
Op.getConstantOperandVal(0);
5374 case Intrinsic::thread_pointer:
5375 return lowerThreadPointer(SDLoc(
Op), DAG);
5377 case Intrinsic::s390_vpdi:
5379 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5381 case Intrinsic::s390_vperm:
5383 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5385 case Intrinsic::s390_vuphb:
5386 case Intrinsic::s390_vuphh:
5387 case Intrinsic::s390_vuphf:
5388 case Intrinsic::s390_vuphg:
5392 case Intrinsic::s390_vuplhb:
5393 case Intrinsic::s390_vuplhh:
5394 case Intrinsic::s390_vuplhf:
5395 case Intrinsic::s390_vuplhg:
5399 case Intrinsic::s390_vuplb:
5400 case Intrinsic::s390_vuplhw:
5401 case Intrinsic::s390_vuplf:
5402 case Intrinsic::s390_vuplg:
5406 case Intrinsic::s390_vupllb:
5407 case Intrinsic::s390_vupllh:
5408 case Intrinsic::s390_vupllf:
5409 case Intrinsic::s390_vupllg:
5413 case Intrinsic::s390_vsumb:
5414 case Intrinsic::s390_vsumh:
5415 case Intrinsic::s390_vsumgh:
5416 case Intrinsic::s390_vsumgf:
5417 case Intrinsic::s390_vsumqf:
5418 case Intrinsic::s390_vsumqg:
5420 Op.getOperand(1),
Op.getOperand(2));
5422 case Intrinsic::s390_vaq:
5424 Op.getOperand(1),
Op.getOperand(2));
5425 case Intrinsic::s390_vaccb:
5426 case Intrinsic::s390_vacch:
5427 case Intrinsic::s390_vaccf:
5428 case Intrinsic::s390_vaccg:
5429 case Intrinsic::s390_vaccq:
5431 Op.getOperand(1),
Op.getOperand(2));
5432 case Intrinsic::s390_vacq:
5434 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5435 case Intrinsic::s390_vacccq:
5437 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5439 case Intrinsic::s390_vsq:
5441 Op.getOperand(1),
Op.getOperand(2));
5442 case Intrinsic::s390_vscbib:
5443 case Intrinsic::s390_vscbih:
5444 case Intrinsic::s390_vscbif:
5445 case Intrinsic::s390_vscbig:
5446 case Intrinsic::s390_vscbiq:
5448 Op.getOperand(1),
Op.getOperand(2));
5449 case Intrinsic::s390_vsbiq:
5451 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5452 case Intrinsic::s390_vsbcbiq:
5454 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5456 case Intrinsic::s390_vmhb:
5457 case Intrinsic::s390_vmhh:
5458 case Intrinsic::s390_vmhf:
5459 case Intrinsic::s390_vmhg:
5460 case Intrinsic::s390_vmhq:
5462 Op.getOperand(1),
Op.getOperand(2));
5463 case Intrinsic::s390_vmlhb:
5464 case Intrinsic::s390_vmlhh:
5465 case Intrinsic::s390_vmlhf:
5466 case Intrinsic::s390_vmlhg:
5467 case Intrinsic::s390_vmlhq:
5469 Op.getOperand(1),
Op.getOperand(2));
5471 case Intrinsic::s390_vmahb:
5472 case Intrinsic::s390_vmahh:
5473 case Intrinsic::s390_vmahf:
5474 case Intrinsic::s390_vmahg:
5475 case Intrinsic::s390_vmahq:
5477 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5478 case Intrinsic::s390_vmalhb:
5479 case Intrinsic::s390_vmalhh:
5480 case Intrinsic::s390_vmalhf:
5481 case Intrinsic::s390_vmalhg:
5482 case Intrinsic::s390_vmalhq:
5484 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5486 case Intrinsic::s390_vmeb:
5487 case Intrinsic::s390_vmeh:
5488 case Intrinsic::s390_vmef:
5489 case Intrinsic::s390_vmeg:
5491 Op.getOperand(1),
Op.getOperand(2));
5492 case Intrinsic::s390_vmleb:
5493 case Intrinsic::s390_vmleh:
5494 case Intrinsic::s390_vmlef:
5495 case Intrinsic::s390_vmleg:
5497 Op.getOperand(1),
Op.getOperand(2));
5498 case Intrinsic::s390_vmob:
5499 case Intrinsic::s390_vmoh:
5500 case Intrinsic::s390_vmof:
5501 case Intrinsic::s390_vmog:
5503 Op.getOperand(1),
Op.getOperand(2));
5504 case Intrinsic::s390_vmlob:
5505 case Intrinsic::s390_vmloh:
5506 case Intrinsic::s390_vmlof:
5507 case Intrinsic::s390_vmlog:
5509 Op.getOperand(1),
Op.getOperand(2));
5511 case Intrinsic::s390_vmaeb:
5512 case Intrinsic::s390_vmaeh:
5513 case Intrinsic::s390_vmaef:
5514 case Intrinsic::s390_vmaeg:
5517 Op.getOperand(1),
Op.getOperand(2)),
5519 case Intrinsic::s390_vmaleb:
5520 case Intrinsic::s390_vmaleh:
5521 case Intrinsic::s390_vmalef:
5522 case Intrinsic::s390_vmaleg:
5525 Op.getOperand(1),
Op.getOperand(2)),
5527 case Intrinsic::s390_vmaob:
5528 case Intrinsic::s390_vmaoh:
5529 case Intrinsic::s390_vmaof:
5530 case Intrinsic::s390_vmaog:
5533 Op.getOperand(1),
Op.getOperand(2)),
5535 case Intrinsic::s390_vmalob:
5536 case Intrinsic::s390_vmaloh:
5537 case Intrinsic::s390_vmalof:
5538 case Intrinsic::s390_vmalog:
5541 Op.getOperand(1),
Op.getOperand(2)),
5563 { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 } },
5566 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
5569 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
5572 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
5575 { 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 } },
5578 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
5581 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
5584 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
5587 { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 } },
5590 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
5593 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
5596 { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } },
5599 { 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 } }
5613 OpNo0 = OpNo1 = OpNos[1];
5614 }
else if (OpNos[1] < 0) {
5615 OpNo0 = OpNo1 = OpNos[0];
5633 unsigned &OpNo0,
unsigned &OpNo1) {
5634 int OpNos[] = { -1, -1 };
5647 if (OpNos[ModelOpNo] == 1 - RealOpNo)
5649 OpNos[ModelOpNo] = RealOpNo;
5657 unsigned &OpNo0,
unsigned &OpNo1) {
5674 int Elt = Bytes[From];
5677 Transform[From] = -1;
5679 while (
P.Bytes[To] != Elt) {
5684 Transform[From] = To;
5708 Bytes.
resize(NumElements * BytesPerElement, -1);
5709 for (
unsigned I = 0;
I < NumElements; ++
I) {
5710 int Index = VSN->getMaskElt(
I);
5712 for (
unsigned J = 0; J < BytesPerElement; ++J)
5713 Bytes[
I * BytesPerElement + J] = Index * BytesPerElement + J;
5720 Bytes.
resize(NumElements * BytesPerElement, -1);
5721 for (
unsigned I = 0;
I < NumElements; ++
I)
5722 for (
unsigned J = 0; J < BytesPerElement; ++J)
5723 Bytes[
I * BytesPerElement + J] = Index * BytesPerElement + J;
5734 unsigned BytesPerElement,
int &
Base) {
5736 for (
unsigned I = 0;
I < BytesPerElement; ++
I) {
5737 if (Bytes[Start +
I] >= 0) {
5738 unsigned Elem = Bytes[Start +
I];
5742 if (
unsigned(
Base) % Bytes.
size() + BytesPerElement > Bytes.
size())
5744 }
else if (
unsigned(
Base) != Elem -
I)
5757 unsigned &StartIndex,
unsigned &OpNo0,
5759 int OpNos[] = { -1, -1 };
5761 for (
unsigned I = 0;
I < 16; ++
I) {
5762 int Index = Bytes[
I];
5768 Shift = ExpectedShift;
5769 else if (Shift != ExpectedShift)
5773 if (OpNos[ModelOpNo] == 1 - RealOpNo)
5775 OpNos[ModelOpNo] = RealOpNo;
5794 Op0 = DAG.
getNode(ISD::BITCAST,
DL, InVT, Op0);
5795 Op1 = DAG.
getNode(ISD::BITCAST,
DL, InVT, Op1);
5811 if (
N->getOpcode() == ISD::BITCAST)
5812 N =
N->getOperand(0);
5815 return Op->getZExtValue() == 0;
5821 for (
unsigned I = 0;
I < Num ;
I++)
5833 for (
unsigned I = 0;
I < 2; ++
I)
5837 unsigned StartIndex, OpNo0, OpNo1;
5846 if (ZeroVecIdx != UINT32_MAX) {
5847 bool MaskFirst =
true;
5852 if (OpNo == ZeroVecIdx &&
I == 0) {
5857 if (OpNo != ZeroVecIdx && Byte == 0) {
5864 if (ZeroIdx != -1) {
5867 if (Bytes[
I] >= 0) {
5870 if (OpNo == ZeroVecIdx)
5903struct GeneralShuffle {
5904 GeneralShuffle(EVT vt)
5905 : VT(vt), UnpackFromEltSize(UINT_MAX), UnpackLow(
false) {}
5909 void tryPrepareForUnpack();
5910 bool unpackWasPrepared() {
return UnpackFromEltSize <= 4; }
5925 unsigned UnpackFromEltSize;
5932void GeneralShuffle::addUndef() {
5934 for (
unsigned I = 0;
I < BytesPerElement; ++
I)
5935 Bytes.push_back(-1);
5944bool GeneralShuffle::add(
SDValue Op,
unsigned Elem) {
5950 EVT FromVT =
Op.getNode() ?
Op.getValueType() : VT;
5955 if (FromBytesPerElement < BytesPerElement)
5959 (FromBytesPerElement - BytesPerElement));
5962 while (
Op.getNode()) {
5963 if (
Op.getOpcode() == ISD::BITCAST)
5964 Op =
Op.getOperand(0);
5980 }
else if (
Op.isUndef()) {
5989 for (; OpNo <
Ops.size(); ++OpNo)
5990 if (
Ops[OpNo] ==
Op)
5992 if (OpNo ==
Ops.size())
5997 for (
unsigned I = 0;
I < BytesPerElement; ++
I)
5998 Bytes.push_back(
Base +
I);
6007 if (
Ops.size() == 0)
6011 tryPrepareForUnpack();
6014 if (
Ops.size() == 1)
6026 unsigned Stride = 1;
6027 for (; Stride * 2 <
Ops.size(); Stride *= 2) {
6028 for (
unsigned I = 0;
I <
Ops.size() - Stride;
I += Stride * 2) {
6038 else if (OpNo ==
I + Stride)
6049 if (NewBytes[J] >= 0) {
6051 "Invalid double permute");
6054 assert(NewBytesMap[J] < 0 &&
"Invalid double permute");
6060 if (NewBytes[J] >= 0)
6076 unsigned OpNo0, OpNo1;
6080 else if (
const Permute *
P =
matchPermute(Bytes, OpNo0, OpNo1))
6085 Op = insertUnpackIfPrepared(DAG,
DL,
Op);
6092 dbgs() << Msg.c_str() <<
" { ";
6093 for (
unsigned I = 0;
I < Bytes.
size();
I++)
6094 dbgs() << Bytes[
I] <<
" ";
6102void GeneralShuffle::tryPrepareForUnpack() {
6104 if (ZeroVecOpNo == UINT32_MAX ||
Ops.size() == 1)
6109 if (
Ops.size() > 2 &&
6114 UnpackFromEltSize = 1;
6115 for (; UnpackFromEltSize <= 4; UnpackFromEltSize *= 2) {
6116 bool MatchUnpack =
true;
6119 unsigned ToEltSize = UnpackFromEltSize * 2;
6120 bool IsZextByte = (Elt % ToEltSize) < UnpackFromEltSize;
6123 if (Bytes[Elt] != -1) {
6125 if (IsZextByte != (OpNo == ZeroVecOpNo)) {
6126 MatchUnpack =
false;
6132 if (
Ops.size() == 2) {
6134 bool CanUseUnpackLow =
true, CanUseUnpackHigh =
true;
6136 if (SrcBytes[i] == -1)
6138 if (SrcBytes[i] % 16 !=
int(i))
6139 CanUseUnpackHigh =
false;
6141 CanUseUnpackLow =
false;
6142 if (!CanUseUnpackLow && !CanUseUnpackHigh) {
6143 UnpackFromEltSize = UINT_MAX;
6147 if (!CanUseUnpackHigh)
6153 if (UnpackFromEltSize > 4)
6156 LLVM_DEBUG(
dbgs() <<
"Preparing for final unpack of element size "
6157 << UnpackFromEltSize <<
". Zero vector is Op#" << ZeroVecOpNo
6159 dumpBytes(Bytes,
"Original Bytes vector:"););
6168 Elt += UnpackFromEltSize;
6169 for (
unsigned i = 0; i < UnpackFromEltSize; i++, Elt++,
B++)
6170 Bytes[
B] = Bytes[Elt];
6178 Ops.erase(&
Ops[ZeroVecOpNo]);
6180 if (Bytes[
I] >= 0) {
6182 if (OpNo > ZeroVecOpNo)
6193 if (!unpackWasPrepared())
6195 unsigned InBits = UnpackFromEltSize * 8;
6199 unsigned OutBits = InBits * 2;
6204 DL, OutVT, PackedOp);
6209 for (
unsigned I = 1,
E =
Op.getNumOperands();
I !=
E; ++
I)
6210 if (!
Op.getOperand(
I).isUndef())
6226 if (
Value.isUndef())
6279 GeneralShuffle GS(VT);
6281 bool FoundOne =
false;
6282 for (
unsigned I = 0;
I < NumElements; ++
I) {
6285 Op =
Op.getOperand(0);
6288 unsigned Elem =
Op.getConstantOperandVal(1);
6289 if (!GS.add(
Op.getOperand(0), Elem))
6292 }
else if (
Op.isUndef()) {
6306 if (!ResidueOps.
empty()) {
6307 while (ResidueOps.
size() < NumElements)
6309 for (
auto &
Op : GS.Ops) {
6310 if (!
Op.getNode()) {
6316 return GS.getNode(DAG,
SDLoc(BVN));
6319bool SystemZTargetLowering::isVectorElementLoad(
SDValue Op)
const {
6323 if (
AL->getOpcode() == ISD::ATOMIC_LOAD)
6336 unsigned int NumElements = Elems.
size();
6337 unsigned int Count = 0;
6338 for (
auto Elem : Elems) {
6339 if (!Elem.isUndef()) {
6342 else if (Elem != Single) {
6362 if (
Single.getNode() && (
Count > 1 || isVectorElementLoad(Single)))
6366 bool AllLoads =
true;
6367 for (
auto Elem : Elems)
6368 if (!isVectorElementLoad(Elem)) {
6374 if (VT == MVT::v2i64 && !AllLoads)
6378 if (VT == MVT::v2f64 && !AllLoads)
6388 if (VT == MVT::v4f32 && !AllLoads) {
6399 Op01 = DAG.
getNode(ISD::BITCAST,
DL, MVT::v2i64, Op01);
6400 Op23 = DAG.
getNode(ISD::BITCAST,
DL, MVT::v2i64, Op23);
6402 DL, MVT::v2i64, Op01, Op23);
6410 unsigned NumConstants = 0;
6411 for (
unsigned I = 0;
I < NumElements; ++
I) {
6425 if (NumConstants > 0) {
6426 for (
unsigned I = 0;
I < NumElements; ++
I)
6437 std::map<const SDNode*, unsigned> UseCounts;
6438 SDNode *LoadMaxUses =
nullptr;
6439 for (
unsigned I = 0;
I < NumElements; ++
I)
6440 if (isVectorElementLoad(Elems[
I])) {
6441 SDNode *Ld = Elems[
I].getNode();
6442 unsigned Count = ++UseCounts[Ld];
6443 if (LoadMaxUses ==
nullptr || UseCounts[LoadMaxUses] <
Count)
6446 if (LoadMaxUses !=
nullptr) {
6447 ReplicatedVal =
SDValue(LoadMaxUses, 0);
6451 unsigned I1 = NumElements / 2 - 1;
6452 unsigned I2 = NumElements - 1;
6453 bool Def1 = !Elems[
I1].isUndef();
6454 bool Def2 = !Elems[I2].isUndef();
6468 for (
unsigned I = 0;
I < NumElements; ++
I)
6469 if (!
Done[
I] && !Elems[
I].
isUndef() && Elems[
I] != ReplicatedVal)
6479 EVT VT =
Op.getValueType();
6481 if (BVN->isConstant()) {
6482 if (SystemZVectorConstantInfo(BVN).isVectorConstantLegal(Subtarget))
6500 for (
unsigned I = 0;
I < NumElements; ++
I)
6502 return buildVector(DAG,
DL, VT,
Ops);
6509 EVT VT =
Op.getValueType();
6512 if (VSN->isSplat()) {
6514 unsigned Index = VSN->getSplatIndex();
6516 "Splat index should be defined and in first operand");
6526 GeneralShuffle
GS(VT);
6527 for (
unsigned I = 0;
I < NumElements; ++
I) {
6528 int Elt = VSN->getMaskElt(
I);
6531 else if (!
GS.add(
Op.getOperand(
unsigned(Elt) / NumElements),
6532 unsigned(Elt) % NumElements))
6535 return GS.getNode(DAG, SDLoc(VSN));
6554 EVT VT =
Op.getValueType();
6559 if (VT == MVT::v2f64 &&
6573 DAG.
getNode(ISD::BITCAST,
DL, IntVecVT, Op0),
6574 DAG.
getNode(ISD::BITCAST,
DL, IntVT, Op1), Op2);
6575 return DAG.
getNode(ISD::BITCAST,
DL, VT, Res);
6579SystemZTargetLowering::lowerEXTRACT_VECTOR_ELT(
SDValue Op,
6585 EVT VT =
Op.getValueType();
6590 uint64_t
Index = CIndexN->getZExtValue();
6600 DAG.
getNode(ISD::BITCAST,
DL, IntVecVT, Op0), Op1);
6601 return DAG.
getNode(ISD::BITCAST,
DL, VT, Res);
6604SDValue SystemZTargetLowering::
6607 EVT OutVT =
Op.getValueType();
6611 unsigned StartOffset = 0;
6618 ArrayRef<int> ShuffleMask = SVN->
getMask();
6623 if (ToBits == 64 && OutNumElts == 2) {
6624 int NumElem = ToBits / FromBits;
6625 if (ShuffleMask[0] == NumElem - 1 && ShuffleMask[1] == 2 * NumElem - 1)
6631 int StartOffsetCandidate = -1;
6632 for (
int Elt = 0; Elt < OutNumElts; Elt++) {
6633 if (ShuffleMask[Elt] == -1)
6635 if (ShuffleMask[Elt] % OutNumElts == Elt) {
6636 if (StartOffsetCandidate == -1)
6637 StartOffsetCandidate = ShuffleMask[Elt] - Elt;
6638 if (StartOffsetCandidate == ShuffleMask[Elt] - Elt)
6641 StartOffsetCandidate = -1;
6644 if (StartOffsetCandidate != -1) {
6645 StartOffset = StartOffsetCandidate;
6655 if (StartOffset >= OutNumElts) {
6657 StartOffset -= OutNumElts;
6659 PackedOp = DAG.
getNode(Opcode, SDLoc(PackedOp), OutVT, PackedOp);
6660 }
while (FromBits != ToBits);
6665SDValue SystemZTargetLowering::
6669 EVT OutVT =
Op.getValueType();
6673 unsigned NumInPerOut = InNumElts / OutNumElts;
6678 SmallVector<int, 16>
Mask(InNumElts);
6679 unsigned ZeroVecElt = InNumElts;
6680 for (
unsigned PackedElt = 0; PackedElt < OutNumElts; PackedElt++) {
6681 unsigned MaskElt = PackedElt * NumInPerOut;
6682 unsigned End = MaskElt + NumInPerOut - 1;
6683 for (; MaskElt < End; MaskElt++)
6684 Mask[MaskElt] = ZeroVecElt++;
6685 Mask[MaskElt] = PackedElt;
6688 return DAG.
getNode(ISD::BITCAST,
DL, OutVT, Shuf);
6692 unsigned ByScalar)
const {
6697 EVT VT =
Op.getValueType();
6702 APInt SplatBits, SplatUndef;
6703 unsigned SplatBitSize;
6707 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs,
6708 ElemBitSize,
true) &&
6709 SplatBitSize == ElemBitSize) {
6712 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6715 BitVector UndefElements;
6721 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6728 if (VSN->isSplat()) {
6729 SDValue VSNOp0 = VSN->getOperand(0);
6730 unsigned Index = VSN->getSplatIndex();
6732 "Splat index should be defined and in first operand");
6739 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6757 uint64_t ShiftAmt = ShiftAmtNode->getZExtValue() & 127;
6758 if ((ShiftAmt & 7) == 0 || Subtarget.hasVectorEnhancements2()) {
6761 if (ShiftAmt > 120) {
6769 SmallVector<int, 16>
Mask(16);
6770 for (
unsigned Elt = 0; Elt < 16; Elt++)
6771 Mask[Elt] = (ShiftAmt >> 3) + Elt;
6773 if ((ShiftAmt & 7) == 0)
6795 uint64_t ShiftAmt = ShiftAmtNode->getZExtValue() & 127;
6796 if ((ShiftAmt & 7) == 0 || Subtarget.hasVectorEnhancements2()) {
6799 if (ShiftAmt > 120) {
6807 SmallVector<int, 16>
Mask(16);
6808 for (
unsigned Elt = 0; Elt < 16; Elt++)
6809 Mask[Elt] = 16 - (ShiftAmt >> 3) + Elt;
6811 if ((ShiftAmt & 7) == 0)
6827 MVT DstVT =
Op.getSimpleValueType();
6830 unsigned SrcAS =
N->getSrcAddressSpace();
6832 assert(SrcAS !=
N->getDestAddressSpace() &&
6833 "addrspacecast must be between different address spaces");
6841 }
else if (DstVT == MVT::i32) {
6855 if (
In.getSimpleValueType() != MVT::f16)
6862 SDValue Chain,
bool IsStrict)
const {
6863 assert(LC != RTLIB::UNKNOWN_LIBCALL &&
"Unexpected request for libcall!");
6866 std::tie(Result, Chain) =
6875 bool IsStrict =
Op->isStrictFPOpcode();
6877 MVT VT =
Op.getSimpleValueType();
6878 SDValue InOp =
Op.getOperand(IsStrict ? 1 : 0);
6886 if (!Subtarget.hasFPExtension() && !IsSigned)
6897 if (VT == MVT::i128) {
6900 return useLibCall(DAG, LC, VT, InOp,
DL, Chain, IsStrict);
6910 bool IsStrict =
Op->isStrictFPOpcode();
6912 MVT VT =
Op.getSimpleValueType();
6913 SDValue InOp =
Op.getOperand(IsStrict ? 1 : 0);
6918 if (VT == MVT::f16) {
6925 if (!Subtarget.hasFPExtension() && !IsSigned)
6928 if (InVT == MVT::i128) {
6931 return useLibCall(DAG, LC, VT, InOp,
DL, Chain, IsStrict);
6940 assert(
Op.getSimpleValueType() == MVT::i64 &&
6941 "Expexted to convert i64 to f16.");
6953 assert(
Op.getSimpleValueType() == MVT::f16 &&
6954 "Expected to convert f16 to i64.");
6967 EVT RegVT =
Op.getValueType();
6968 assert(RegVT == MVT::f16 &&
"Expected to lower an f16 load.");
6975 assert(EVT(RegVT) == AtomicLd->getMemoryVT() &&
"Unhandled f16 load");
6977 AtomicLd->getChain(), AtomicLd->getBasePtr(),
6978 AtomicLd->getMemOperand());
6997 return DAG.
getAtomic(ISD::ATOMIC_STORE,
DL, MVT::i16, AtomicSt->getChain(),
6998 Shft, AtomicSt->getBasePtr(),
6999 AtomicSt->getMemOperand());
7009 MVT ResultVT =
Op.getSimpleValueType();
7011 unsigned Check =
Op.getConstantOperandVal(1);
7013 unsigned TDCMask = 0;
7050 MachinePointerInfo MPI =
7060 return DAG.
getLoad(MVT::i64,
DL, Chain, StackPtr, MPI);
7065 switch (
Op.getOpcode()) {
7067 return lowerFRAMEADDR(
Op, DAG);
7069 return lowerRETURNADDR(
Op, DAG);
7071 return lowerBR_CC(
Op, DAG);
7073 return lowerSELECT_CC(
Op, DAG);
7075 return lowerSETCC(
Op, DAG);
7077 return lowerSTRICT_FSETCC(
Op, DAG,
false);
7079 return lowerSTRICT_FSETCC(
Op, DAG,
true);
7091 return lowerBITCAST(
Op, DAG);
7093 return lowerVASTART(
Op, DAG);
7095 return lowerVACOPY(
Op, DAG);
7096 case ISD::DYNAMIC_STACKALLOC:
7097 return lowerDYNAMIC_STACKALLOC(
Op, DAG);
7098 case ISD::GET_DYNAMIC_AREA_OFFSET:
7099 return lowerGET_DYNAMIC_AREA_OFFSET(
Op, DAG);
7105 return lowerSMUL_LOHI(
Op, DAG);
7107 return lowerUMUL_LOHI(
Op, DAG);
7109 return lowerSDIVREM(
Op, DAG);
7111 return lowerUDIVREM(
Op, DAG);
7116 return lowerXALUO(
Op, DAG);
7119 return lowerUADDSUBO_CARRY(
Op, DAG);
7121 return lowerOR(
Op, DAG);
7123 return lowerCTPOP(
Op, DAG);
7124 case ISD::VECREDUCE_ADD:
7125 return lowerVECREDUCE_ADD(
Op, DAG);
7126 case ISD::ATOMIC_FENCE:
7127 return lowerATOMIC_FENCE(
Op, DAG);
7128 case ISD::ATOMIC_SWAP:
7130 case ISD::ATOMIC_STORE:
7131 return lowerATOMIC_STORE(
Op, DAG);
7132 case ISD::ATOMIC_LOAD:
7133 return lowerATOMIC_LOAD(
Op, DAG);
7134 case ISD::ATOMIC_LOAD_ADD:
7136 case ISD::ATOMIC_LOAD_SUB:
7137 return lowerATOMIC_LOAD_SUB(
Op, DAG);
7138 case ISD::ATOMIC_LOAD_AND:
7140 case ISD::ATOMIC_LOAD_OR:
7142 case ISD::ATOMIC_LOAD_XOR:
7144 case ISD::ATOMIC_LOAD_NAND:
7146 case ISD::ATOMIC_LOAD_MIN:
7148 case ISD::ATOMIC_LOAD_MAX:
7150 case ISD::ATOMIC_LOAD_UMIN:
7152 case ISD::ATOMIC_LOAD_UMAX:
7154 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
7155 return lowerATOMIC_CMP_SWAP(
Op, DAG);
7156 case ISD::STACKSAVE:
7157 return lowerSTACKSAVE(
Op, DAG);
7158 case ISD::STACKRESTORE:
7159 return lowerSTACKRESTORE(
Op, DAG);
7161 return lowerPREFETCH(
Op, DAG);
7163 return lowerINTRINSIC_W_CHAIN(
Op, DAG);
7165 return lowerINTRINSIC_WO_CHAIN(
Op, DAG);
7167 return lowerBUILD_VECTOR(
Op, DAG);
7169 return lowerVECTOR_SHUFFLE(
Op, DAG);
7171 return lowerSCALAR_TO_VECTOR(
Op, DAG);
7173 return lowerINSERT_VECTOR_ELT(
Op, DAG);
7175 return lowerEXTRACT_VECTOR_ELT(
Op, DAG);
7177 return lowerSIGN_EXTEND_VECTOR_INREG(
Op, DAG);
7179 return lowerZERO_EXTEND_VECTOR_INREG(
Op, DAG);
7186 case ISD::ADDRSPACECAST:
7191 return lowerFSHL(
Op, DAG);
7193 return lowerFSHR(
Op, DAG);
7194 case ISD::FP_EXTEND:
7196 return lowerFP_EXTEND(
Op, DAG);
7201 return lower_FP_TO_INT(
Op, DAG);
7206 return lower_INT_TO_FP(
Op, DAG);
7208 return lowerLoadF16(
Op, DAG);
7210 return lowerStoreF16(
Op, DAG);
7212 return lowerIS_FPCLASS(
Op, DAG);
7214 return lowerGET_ROUNDING(
Op, DAG);
7215 case ISD::READCYCLECOUNTER:
7216 return lowerREADCYCLECOUNTER(
Op, DAG);
7238 &SystemZ::FP128BitRegClass);
7247 SystemZ::REG_SEQUENCE, SL, MVT::f128,
7262 &SystemZ::FP128BitRegClass);
7279 switch (
N->getOpcode()) {
7280 case ISD::ATOMIC_LOAD: {
7283 SDValue Ops[] = {
N->getOperand(0),
N->getOperand(1) };
7286 DL, Tys,
Ops, MVT::i128, MMO);
7289 if (
N->getValueType(0) == MVT::f128)
7295 case ISD::ATOMIC_STORE: {
7303 SDValue Ops[] = {
N->getOperand(0), Val,
N->getOperand(2)};
7306 DL, Tys,
Ops, MVT::i128, MMO);
7312 MVT::Other, Res), 0);
7316 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
7324 DL, Tys,
Ops, MVT::i128, MMO);
7333 case ISD::BITCAST: {
7338 EVT SrcVT = Src.getValueType();
7339 EVT ResVT =
N->getValueType(0);
7340 if (ResVT == MVT::i128 && SrcVT == MVT::f128)
7342 else if (SrcVT == MVT::i16 && ResVT == MVT::f16) {
7343 if (Subtarget.hasVector()) {
7351 }
else if (SrcVT == MVT::f16 && ResVT == MVT::i16) {
7353 Subtarget.hasVector()
7367 bool IsStrict =
N->isStrictFPOpcode();
7369 SDValue InOp =
N->getOperand(IsStrict ? 1 : 0);
7370 EVT ResVT =
N->getValueType(0);
7372 if (ResVT == MVT::f16) {
7395 bool IsStrict =
N->isStrictFPOpcode();
7397 EVT ResVT =
N->getValueType(0);
7398 SDValue InOp =
N->getOperand(IsStrict ? 1 : 0);
7401 if (InVT == MVT::f16) {
7407 std::tie(InF32, Chain) =
7431#define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME
7550 OPCODE(ATOMIC_LOADW_ADD);
7551 OPCODE(ATOMIC_LOADW_SUB);
7552 OPCODE(ATOMIC_LOADW_AND);
7554 OPCODE(ATOMIC_LOADW_XOR);
7555 OPCODE(ATOMIC_LOADW_NAND);
7556 OPCODE(ATOMIC_LOADW_MIN);
7557 OPCODE(ATOMIC_LOADW_MAX);
7558 OPCODE(ATOMIC_LOADW_UMIN);
7559 OPCODE(ATOMIC_LOADW_UMAX);
7560 OPCODE(ATOMIC_CMP_SWAPW);
7563 OPCODE(ATOMIC_STORE_128);
7564 OPCODE(ATOMIC_CMP_SWAP_128);
7579bool SystemZTargetLowering::canTreatAsByteVector(
EVT VT)
const {
7580 if (!Subtarget.hasVector())
7594 DAGCombinerInfo &DCI,
7602 unsigned Opcode =
Op.getOpcode();
7603 if (Opcode == ISD::BITCAST)
7605 Op =
Op.getOperand(0);
7607 canTreatAsByteVector(
Op.getValueType())) {
7616 BytesPerElement,
First))
7623 if (Byte % BytesPerElement != 0)
7626 Index = Byte / BytesPerElement;
7630 canTreatAsByteVector(
Op.getValueType())) {
7633 EVT OpVT =
Op.getValueType();
7635 if (OpBytesPerElement < BytesPerElement)
7639 unsigned End = (
Index + 1) * BytesPerElement;
7640 if (End % OpBytesPerElement != 0)
7643 Op =
Op.getOperand(End / OpBytesPerElement - 1);
7644 if (!
Op.getValueType().isInteger()) {
7647 DCI.AddToWorklist(
Op.getNode());
7652 DCI.AddToWorklist(
Op.getNode());
7659 canTreatAsByteVector(
Op.getValueType()) &&
7660 canTreatAsByteVector(
Op.getOperand(0).getValueType())) {
7662 EVT ExtVT =
Op.getValueType();
7663 EVT OpVT =
Op.getOperand(0).getValueType();
7666 unsigned Byte =
Index * BytesPerElement;
7667 unsigned SubByte =
Byte % ExtBytesPerElement;
7668 unsigned MinSubByte = ExtBytesPerElement - OpBytesPerElement;
7669 if (SubByte < MinSubByte ||
7670 SubByte + BytesPerElement > ExtBytesPerElement)
7673 Byte =
Byte / ExtBytesPerElement * OpBytesPerElement;
7675 Byte += SubByte - MinSubByte;
7676 if (Byte % BytesPerElement != 0)
7678 Op =
Op.getOperand(0);
7685 if (
Op.getValueType() != VecVT) {
7687 DCI.AddToWorklist(
Op.getNode());
7697SDValue SystemZTargetLowering::combineTruncateExtract(
7706 if (canTreatAsByteVector(VecVT)) {
7710 if (BytesPerElement % TruncBytes == 0) {
7716 unsigned Scale = BytesPerElement / TruncBytes;
7717 unsigned NewIndex = (IndexN->getZExtValue() + 1) * Scale - 1;
7724 EVT ResVT = (TruncBytes < 4 ? MVT::i32 : TruncVT);
7725 return combineExtract(
DL, ResVT, VecVT, Vec, NewIndex, DCI,
true);
7733SDValue SystemZTargetLowering::combineZERO_EXTEND(
7734 SDNode *
N, DAGCombinerInfo &DCI)
const {
7736 SelectionDAG &DAG = DCI.DAG;
7738 EVT VT =
N->getValueType(0);
7742 if (TrueOp && FalseOp) {
7752 DCI.CombineTo(N0.
getNode(), TruncSelect);
7813SDValue SystemZTargetLowering::combineSIGN_EXTEND_INREG(
7814 SDNode *
N, DAGCombinerInfo &DCI)
const {
7818 SelectionDAG &DAG = DCI.DAG;
7820 EVT VT =
N->getValueType(0);
7834SDValue SystemZTargetLowering::combineSIGN_EXTEND(
7835 SDNode *
N, DAGCombinerInfo &DCI)
const {
7839 SelectionDAG &DAG = DCI.DAG;
7841 EVT VT =
N->getValueType(0);
7848 unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra;
7849 unsigned NewSraAmt = SraAmt->getZExtValue() + Extra;
7865SDValue SystemZTargetLowering::combineMERGE(
7866 SDNode *
N, DAGCombinerInfo &DCI)
const {
7867 SelectionDAG &DAG = DCI.DAG;
7868 unsigned Opcode =
N->getOpcode();
7876 if (Op1 ==
N->getOperand(0))
7881 if (ElemBytes <= 4) {
7888 Op1 = DAG.
getNode(ISD::BITCAST, SDLoc(
N), InVT, Op1);
7889 DCI.AddToWorklist(Op1.
getNode());
7892 DCI.AddToWorklist(
Op.getNode());
7893 return DAG.
getNode(ISD::BITCAST, SDLoc(
N), VT,
Op);
7901 LoPart = HiPart =
nullptr;
7906 if (
Use.getResNo() != 0)
7911 bool IsLoPart =
true;
7936 LoPart = HiPart =
nullptr;
7941 if (
Use.getResNo() != 0)
7947 User->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
7950 switch (
User->getConstantOperandVal(1)) {
7951 case SystemZ::subreg_l64:
7956 case SystemZ::subreg_h64:
7968SDValue SystemZTargetLowering::combineLOAD(
7969 SDNode *
N, DAGCombinerInfo &DCI)
const {
7970 SelectionDAG &DAG = DCI.DAG;
7971 EVT LdVT =
N->getValueType(0);
7975 MVT LoadNodeVT = LN->getBasePtr().getSimpleValueType();
7976 if (PtrVT != LoadNodeVT) {
7980 return DAG.
getExtLoad(LN->getExtensionType(),
DL, LN->getValueType(0),
7981 LN->getChain(), AddrSpaceCast, LN->getMemoryVT(),
7982 LN->getMemOperand());
7992 SDNode *LoPart, *HiPart;
8000 LD->getPointerInfo(),
LD->getBaseAlign(),
8001 LD->getMemOperand()->getFlags(),
LD->getAAInfo());
8003 DCI.CombineTo(HiPart, EltLoad,
true);
8010 LD->getPointerInfo().getWithOffset(8),
LD->getBaseAlign(),
8011 LD->getMemOperand()->getFlags(),
LD->getAAInfo());
8013 DCI.CombineTo(LoPart, EltLoad,
true);
8020 DCI.AddToWorklist(Chain.
getNode());
8035 for (SDUse &Use :
N->uses()) {
8040 }
else if (
Use.getResNo() == 0)
8043 if (!Replicate || OtherUses.
empty())
8049 for (SDNode *U : OtherUses) {
8052 Ops.push_back((
Op.getNode() ==
N &&
Op.getResNo() == 0) ? Extract0 :
Op);
8058bool SystemZTargetLowering::canLoadStoreByteSwapped(
EVT VT)
const {
8059 if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64)
8061 if (Subtarget.hasVectorEnhancements2())
8062 if (VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v2i64 || VT == MVT::i128)
8074 for (
unsigned i = 0; i < NumElts; ++i) {
8075 if (M[i] < 0)
continue;
8076 if ((
unsigned) M[i] != NumElts - 1 - i)
8084 for (
auto *U : StoredVal->
users()) {
8086 EVT CurrMemVT = ST->getMemoryVT().getScalarType();
8145SDValue SystemZTargetLowering::combineSTORE(
8146 SDNode *
N, DAGCombinerInfo &DCI)
const {
8147 SelectionDAG &DAG = DCI.DAG;
8150 EVT MemVT = SN->getMemoryVT();
8154 MVT StoreNodeVT = SN->getBasePtr().getSimpleValueType();
8155 if (PtrVT != StoreNodeVT) {
8159 return DAG.
getStore(SN->getChain(),
DL, SN->getValue(), AddrSpaceCast,
8160 SN->getPointerInfo(), SN->getBaseAlign(),
8161 SN->getMemOperand()->getFlags(), SN->getAAInfo());
8169 if (MemVT.
isInteger() && SN->isTruncatingStore()) {
8171 combineTruncateExtract(SDLoc(
N), MemVT, SN->getValue(), DCI)) {
8172 DCI.AddToWorklist(
Value.getNode());
8176 SN->getBasePtr(), SN->getMemoryVT(),
8177 SN->getMemOperand());
8181 if (!SN->isTruncatingStore() &&
8197 Ops, MemVT, SN->getMemOperand());
8200 if (!SN->isTruncatingStore() &&
8203 Subtarget.hasVectorEnhancements2()) {
8205 ArrayRef<int> ShuffleMask = SVN->
getMask();
8213 Ops, MemVT, SN->getMemOperand());
8218 if (!SN->isTruncatingStore() &&
8219 Op1.
getOpcode() == ISD::READCYCLECOUNTER &&
8221 N->getOperand(0).reachesChainWithoutSideEffects(
SDValue(Op1.
getNode(), 1))) {
8225 Ops, MemVT, SN->getMemOperand());
8235 SN->getChain(),
DL, HiPart, SN->getBasePtr(), SN->getPointerInfo(),
8236 SN->getBaseAlign(), SN->getMemOperand()->getFlags(), SN->getAAInfo());
8238 SN->getChain(),
DL, LoPart,
8240 SN->getPointerInfo().getWithOffset(8), SN->getBaseAlign(),
8241 SN->getMemOperand()->
getFlags(), SN->getAAInfo());
8259 auto FindReplicatedImm = [&](ConstantSDNode *
C,
unsigned TotBytes) {
8261 if (
C->getAPIntValue().getBitWidth() > 64 ||
C->isAllOnes() ||
8265 APInt Val =
C->getAPIntValue();
8268 assert(SN->isTruncatingStore() &&
8269 "Non-truncating store and immediate value does not fit?");
8270 Val = Val.
trunc(TotBytes * 8);
8273 SystemZVectorConstantInfo VCI(APInt(TotBytes * 8, Val.
getZExtValue()));
8274 if (VCI.isVectorConstantLegal(Subtarget) &&
8283 auto FindReplicatedReg = [&](
SDValue MulOp) {
8284 EVT MulVT = MulOp.getValueType();
8285 if (MulOp->getOpcode() ==
ISD::MUL &&
8286 (MulVT == MVT::i16 || MulVT == MVT::i32 || MulVT == MVT::i64)) {
8290 WordVT =
LHS->getOperand(0).getValueType();
8297 SystemZVectorConstantInfo VCI(
8299 if (VCI.isVectorConstantLegal(Subtarget) &&
8301 WordVT == VCI.VecVT.getScalarType())
8313 FindReplicatedReg(SplatVal);
8318 FindReplicatedReg(Op1);
8323 "Bad type handling");
8327 return DAG.
getStore(SN->getChain(), SDLoc(SN), SplatVal,
8328 SN->getBasePtr(), SN->getMemOperand());
8335SDValue SystemZTargetLowering::combineVECTOR_SHUFFLE(
8336 SDNode *
N, DAGCombinerInfo &DCI)
const {
8337 SelectionDAG &DAG = DCI.DAG;
8340 N->getOperand(0).hasOneUse() &&
8341 Subtarget.hasVectorEnhancements2()) {
8343 ArrayRef<int> ShuffleMask = SVN->
getMask();
8356 Ops,
LD->getMemoryVT(),
LD->getMemOperand());
8360 DCI.CombineTo(
N, ESLoad);
8364 DCI.CombineTo(
Load.getNode(), ESLoad, ESLoad.
getValue(1));
8374SDValue SystemZTargetLowering::combineEXTRACT_VECTOR_ELT(
8375 SDNode *
N, DAGCombinerInfo &DCI)
const {
8376 SelectionDAG &DAG = DCI.DAG;
8378 if (!Subtarget.hasVector())
8383 if (
Op.getOpcode() == ISD::BITCAST &&
8384 Op.getValueType().isVector() &&
8385 Op.getOperand(0).getValueType().isVector() &&
8386 Op.getValueType().getVectorNumElements() ==
8387 Op.getOperand(0).getValueType().getVectorNumElements())
8388 Op =
Op.getOperand(0);
8392 EVT VecVT =
Op.getValueType();
8395 Op.getOperand(0),
N->getOperand(1));
8396 DCI.AddToWorklist(
Op.getNode());
8398 if (EltVT !=
N->getValueType(0)) {
8399 DCI.AddToWorklist(
Op.getNode());
8400 Op = DAG.
getNode(ISD::BITCAST, SDLoc(
N),
N->getValueType(0),
Op);
8409 if (canTreatAsByteVector(VecVT))
8410 return combineExtract(SDLoc(
N),
N->getValueType(0), VecVT, Op0,
8411 IndexN->getZExtValue(), DCI,
false);
8416SDValue SystemZTargetLowering::combineJOIN_DWORDS(
8417 SDNode *
N, DAGCombinerInfo &DCI)
const {
8418 SelectionDAG &DAG = DCI.DAG;
8420 if (
N->getOperand(0) ==
N->getOperand(1))
8431 if (Chain1 == Chain2)
8439SDValue SystemZTargetLowering::combineFP_ROUND(
8440 SDNode *
N, DAGCombinerInfo &DCI)
const {
8442 if (!Subtarget.hasVector())
8451 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
8452 SelectionDAG &DAG = DCI.DAG;
8454 if (
N->getValueType(0) == MVT::f32 && Op0.
hasOneUse() &&
8460 for (
auto *U : Vec->
users()) {
8461 if (U != Op0.
getNode() &&
U->hasOneUse() &&
8463 U->getOperand(0) == Vec &&
8465 U->getConstantOperandVal(1) == 1) {
8467 if (OtherRound.
getOpcode() ==
N->getOpcode() &&
8471 if (
N->isStrictFPOpcode()) {
8476 {MVT::v4f32, MVT::Other}, {Chain, Vec});
8481 DCI.AddToWorklist(VRound.
getNode());
8485 DCI.AddToWorklist(Extract1.
getNode());
8491 VRound, DAG.
getConstant(0, SDLoc(Op0), MVT::i32));
8494 N->getVTList(), Extract0, Chain);
8503SDValue SystemZTargetLowering::combineFP_EXTEND(
8504 SDNode *
N, DAGCombinerInfo &DCI)
const {
8506 if (!Subtarget.hasVector())
8515 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
8516 SelectionDAG &DAG = DCI.DAG;
8518 if (
N->getValueType(0) == MVT::f64 && Op0.
hasOneUse() &&
8524 for (
auto *U : Vec->
users()) {
8525 if (U != Op0.
getNode() &&
U->hasOneUse() &&
8527 U->getOperand(0) == Vec &&
8529 U->getConstantOperandVal(1) == 2) {
8531 if (OtherExtend.
getOpcode() ==
N->getOpcode() &&
8535 if (
N->isStrictFPOpcode()) {
8540 {MVT::v2f64, MVT::Other}, {Chain, Vec});
8545 DCI.AddToWorklist(VExtend.
getNode());
8549 DCI.AddToWorklist(Extract1.
getNode());
8555 VExtend, DAG.
getConstant(0, SDLoc(Op0), MVT::i32));
8558 N->getVTList(), Extract0, Chain);
8567SDValue SystemZTargetLowering::combineINT_TO_FP(
8568 SDNode *
N, DAGCombinerInfo &DCI)
const {
8571 SelectionDAG &DAG = DCI.DAG;
8573 unsigned Opcode =
N->getOpcode();
8574 EVT OutVT =
N->getValueType(0);
8578 unsigned InScalarBits =
Op->getValueType(0).getScalarSizeInBits();
8584 if (OutLLVMTy->
isVectorTy() && OutScalarBits > InScalarBits &&
8585 OutScalarBits <= 64) {
8589 unsigned ExtOpcode =
8592 return DAG.
getNode(Opcode, SDLoc(
N), OutVT, ExtOp);
8597SDValue SystemZTargetLowering::combineFCOPYSIGN(
8598 SDNode *
N, DAGCombinerInfo &DCI)
const {
8599 SelectionDAG &DAG = DCI.DAG;
8600 EVT VT =
N->getValueType(0);
8613SDValue SystemZTargetLowering::combineBSWAP(
8614 SDNode *
N, DAGCombinerInfo &DCI)
const {
8615 SelectionDAG &DAG = DCI.DAG;
8618 N->getOperand(0).hasOneUse() &&
8619 canLoadStoreByteSwapped(
N->getValueType(0))) {
8628 EVT LoadVT =
N->getValueType(0);
8629 if (LoadVT == MVT::i16)
8634 Ops,
LD->getMemoryVT(),
LD->getMemOperand());
8638 if (
N->getValueType(0) == MVT::i16)
8643 DCI.CombineTo(
N, ResVal);
8647 DCI.CombineTo(
Load.getNode(), ResVal, BSLoad.
getValue(1));
8655 if (
Op.getOpcode() == ISD::BITCAST &&
8656 Op.getValueType().isVector() &&
8657 Op.getOperand(0).getValueType().isVector() &&
8658 Op.getValueType().getVectorNumElements() ==
8659 Op.getOperand(0).getValueType().getVectorNumElements())
8660 Op =
Op.getOperand(0);
8672 (canLoadStoreByteSwapped(
N->getValueType(0)) &&
8674 EVT VecVT =
N->getValueType(0);
8677 Vec = DAG.
getNode(ISD::BITCAST, SDLoc(
N), VecVT, Vec);
8678 DCI.AddToWorklist(Vec.
getNode());
8681 Elt = DAG.
getNode(ISD::BITCAST, SDLoc(
N), EltVT, Elt);
8682 DCI.AddToWorklist(Elt.
getNode());
8685 DCI.AddToWorklist(Vec.
getNode());
8687 DCI.AddToWorklist(Elt.
getNode());
8695 if (SV &&
Op.hasOneUse()) {
8703 EVT VecVT =
N->getValueType(0);
8705 Op0 = DAG.
getNode(ISD::BITCAST, SDLoc(
N), VecVT, Op0);
8706 DCI.AddToWorklist(Op0.
getNode());
8709 Op1 = DAG.
getNode(ISD::BITCAST, SDLoc(
N), VecVT, Op1);
8710 DCI.AddToWorklist(Op1.
getNode());
8713 DCI.AddToWorklist(Op0.
getNode());
8715 DCI.AddToWorklist(Op1.
getNode());
8723SDValue SystemZTargetLowering::combineSETCC(
8724 SDNode *
N, DAGCombinerInfo &DCI)
const {
8725 SelectionDAG &DAG = DCI.DAG;
8731 EVT VT =
N->getValueType(0);
8741 Src.getValueType().isFixedLengthVector() &&
8742 Src.getValueType().getScalarType() == MVT::i1) {
8743 EVT CmpVT = Src.getOperand(0).getValueType();
8774 return std::make_pair(OpCC, OpCCValid);
8779 int CCValidVal = CCValid->getZExtValue();
8780 return std::make_pair(Op4CCReg, CCValidVal);
8791 return std::make_pair(Op0CC, Op0CCValid);
8807 return {Val, Val, Val, Val};
8813 for (
auto CC : {0, 1, 2, 3})
8816 return ShiftedCCVals;
8822 if (!CCValid || !CCMask)
8825 int CCValidVal = CCValid->getZExtValue();
8826 int CCMaskVal = CCMask->getZExtValue();
8829 if (TrueSDVals.empty() || FalseSDVals.empty())
8837 for (
auto &CCVal : {0, 1, 2, 3})
8838 MergedSDVals.
emplace_back(((CCMaskVal & (1 << (3 - CCVal))) != 0)
8840 : FalseSDVals[CCVal]);
8841 return MergedSDVals;
8858 if (Op0SDVals.empty() || Op1SDVals.empty())
8861 for (
auto CCVal : {0, 1, 2, 3})
8863 Opcode,
DL, Val.
getValueType(), Op0SDVals[CCVal], Op1SDVals[CCVal]));
8864 return BinaryOpSDVals;
8875 auto *CCNode = CCReg.
getNode();
8882 auto emulateTMCCMask = [](
const SDValue &Op0Val,
const SDValue &Op1Val) {
8885 if (!Op0Node || !Op1Node)
8887 auto Op0APVal = Op0Node->getAPIntValue();
8888 auto Op1APVal = Op1Node->getAPIntValue();
8889 auto Result = Op0APVal & Op1APVal;
8890 bool AllOnes = Result == Op1APVal;
8891 bool AllZeros = Result == 0;
8892 bool IsLeftMostBitSet = Result[Op1APVal.getActiveBits()] != 0;
8893 return AllZeros ? 0 :
AllOnes ? 3 : IsLeftMostBitSet ? 2 : 1;
8897 auto [Op0CC, Op0CCValid] =
findCCUse(Op0);
8902 if (Op0SDVals.empty() || Op1SDVals.empty())
8905 for (
auto CC : {0, 1, 2, 3}) {
8906 auto CCVal = emulateTMCCMask(Op0SDVals[CC], Op1SDVals[CC]);
8910 NewCCMask |= (CCMask & (1 << (3 - CCVal))) != 0;
8912 NewCCMask &= Op0CCValid;
8915 CCValid = Op0CCValid;
8925 auto [Op0CC, Op0CCValid] =
findCCUse(CmpOp0);
8929 if (Op0SDVals.empty() || Op1SDVals.empty())
8933 auto CmpTypeVal = CmpType->getZExtValue();
8934 const auto compareCCSigned = [&CmpTypeVal](
const SDValue &Op0Val,
8938 if (!Op0Node || !Op1Node)
8940 auto Op0APVal = Op0Node->getAPIntValue();
8941 auto Op1APVal = Op1Node->getAPIntValue();
8943 return Op0APVal == Op1APVal ? 0 : Op0APVal.slt(Op1APVal) ? 1 : 2;
8944 return Op0APVal == Op1APVal ? 0 : Op0APVal.ult(Op1APVal) ? 1 : 2;
8947 for (
auto CC : {0, 1, 2, 3}) {
8948 auto CCVal = compareCCSigned(Op0SDVals[CC], Op1SDVals[CC]);
8952 NewCCMask |= (CCMask & (1 << (3 - CCVal))) != 0;
8954 NewCCMask &= Op0CCValid;
8957 CCValid = Op0CCValid;
8968 const Value *Rhs)
const {
8969 const auto isFlagOutOpCC = [](
const Value *V) {
8971 const Value *RHSVal;
8978 if (CB->isInlineAsm()) {
8981 IA->getConstraintString().find(
"{@cc}") != std::string::npos;
8992 if (isFlagOutOpCC(Lhs) && isFlagOutOpCC(Rhs))
8995 return {-1, -1, -1};
8999 DAGCombinerInfo &DCI)
const {
9005 if (!CCValid || !CCMask)
9008 int CCValidVal = CCValid->getZExtValue();
9009 int CCMaskVal = CCMask->getZExtValue();
9017 N->getOperand(3), CCReg);
9021SDValue SystemZTargetLowering::combineSELECT_CCMASK(
9022 SDNode *
N, DAGCombinerInfo &DCI)
const {
9028 if (!CCValid || !CCMask)
9031 int CCValidVal = CCValid->getZExtValue();
9032 int CCMaskVal = CCMask->getZExtValue();
9035 bool IsCombinedCCReg =
combineCCMask(CCReg, CCValidVal, CCMaskVal, DAG);
9039 const auto constructCCSDValsFromSELECT = [&CCReg](
SDValue &Val) {
9042 if (Val.getOperand(4) != CCReg)
9049 int CCMaskVal = CCMask->getZExtValue();
9050 for (
auto &CC : {0, 1, 2, 3})
9051 Res.
emplace_back(((CCMaskVal & (1 << (3 - CC))) != 0) ? TrueVal
9065 if (TrueSDVals.empty())
9066 TrueSDVals = constructCCSDValsFromSELECT(TrueVal);
9067 if (FalseSDVals.empty())
9068 FalseSDVals = constructCCSDValsFromSELECT(FalseVal);
9069 if (!TrueSDVals.empty() && !FalseSDVals.empty()) {
9070 SmallSet<SDValue, 4> MergedSDValsSet;
9072 for (
auto CC : {0, 1, 2, 3}) {
9073 if ((CCValidVal & ((1 << (3 - CC)))) != 0)
9074 MergedSDValsSet.
insert(((CCMaskVal & (1 << (3 - CC))) != 0)
9078 if (MergedSDValsSet.
size() == 1)
9079 return *MergedSDValsSet.
begin();
9080 if (MergedSDValsSet.
size() == 2) {
9081 auto BeginIt = MergedSDValsSet.
begin();
9082 SDValue NewTrueVal = *BeginIt, NewFalseVal = *next(BeginIt);
9083 if (NewTrueVal == FalseVal || NewFalseVal == TrueVal)
9086 for (
auto CC : {0, 1, 2, 3}) {
9088 NewCCMask |= ((CCMaskVal & (1 << (3 - CC))) != 0)
9089 ? (TrueSDVals[CC] == NewTrueVal)
9090 : (FalseSDVals[CC] == NewTrueVal);
9092 CCMaskVal = NewCCMask;
9093 CCMaskVal &= CCValidVal;
9096 IsCombinedCCReg =
true;
9100 if (IsCombinedCCReg)
9109SDValue SystemZTargetLowering::combineGET_CCMASK(
9110 SDNode *
N, DAGCombinerInfo &DCI)
const {
9115 if (!CCValid || !CCMask)
9117 int CCValidVal = CCValid->getZExtValue();
9118 int CCMaskVal = CCMask->getZExtValue();
9128 if (!SelectCCValid || !SelectCCMask)
9130 int SelectCCValidVal = SelectCCValid->getZExtValue();
9131 int SelectCCMaskVal = SelectCCMask->getZExtValue();
9135 if (!TrueVal || !FalseVal)
9139 else if (
TrueVal->getZExtValue() == 0 &&
FalseVal->getZExtValue() == 1)
9140 SelectCCMaskVal ^= SelectCCValidVal;
9144 if (SelectCCValidVal & ~CCValidVal)
9146 if (SelectCCMaskVal != (CCMaskVal & SelectCCValidVal))
9149 return Select->getOperand(4);
9152SDValue SystemZTargetLowering::combineIntDIVREM(
9153 SDNode *
N, DAGCombinerInfo &DCI)
const {
9154 SelectionDAG &DAG = DCI.DAG;
9155 EVT VT =
N->getValueType(0);
9172SDValue SystemZTargetLowering::combineShiftToMulAddHigh(
9173 SDNode *
N, DAGCombinerInfo &DCI)
const {
9174 SelectionDAG &DAG = DCI.DAG;
9178 "SRL or SRA node is required here!");
9180 if (!Subtarget.hasVector())
9190 SDValue ShiftOperand =
N->getOperand(0);
9210 if (!IsSignExt && !IsZeroExt)
9218 unsigned ActiveBits = IsSignExt
9219 ?
Constant->getAPIntValue().getSignificantBits()
9220 :
Constant->getAPIntValue().getActiveBits();
9221 if (ActiveBits > NarrowVTSize)
9237 unsigned ActiveBits = IsSignExt
9238 ?
Constant->getAPIntValue().getSignificantBits()
9239 :
Constant->getAPIntValue().getActiveBits();
9240 if (ActiveBits > NarrowVTSize)
9257 "Cannot have a multiply node with two different operand types.");
9259 "Cannot have an add node with two different operand types.");
9270 if (ShiftAmt != NarrowVTSize)
9274 if (!(NarrowVT == MVT::v16i8 || NarrowVT == MVT::v8i16 ||
9275 NarrowVT == MVT::v4i32 ||
9276 (Subtarget.hasVectorEnhancements3() &&
9277 (NarrowVT == MVT::v2i64 || NarrowVT == MVT::i128))))
9283 MulhRightOp, MulhAddOp);
9284 bool IsSigned =
N->getOpcode() ==
ISD::SRA;
9295 EVT VT =
Op.getValueType();
9304 Op =
Op.getOperand(0);
9305 if (
Op.getValueType().getVectorNumElements() == 2 * NumElts &&
9309 bool CanUseEven =
true, CanUseOdd =
true;
9310 for (
unsigned Elt = 0; Elt < NumElts; Elt++) {
9311 if (ShuffleMask[Elt] == -1)
9313 if (
unsigned(ShuffleMask[Elt]) != 2 * Elt)
9315 if (
unsigned(ShuffleMask[Elt]) != 2 * Elt + 1)
9318 Op =
Op.getOperand(0);
9328 if (VT == MVT::i128 && Subtarget.hasVectorEnhancements3() &&
9332 Op =
Op.getOperand(0);
9334 Op.getOperand(0).getValueType() == MVT::v2i64 &&
9336 unsigned Elem =
Op.getConstantOperandVal(1);
9337 Op =
Op.getOperand(0);
9348SDValue SystemZTargetLowering::combineMUL(
9349 SDNode *
N, DAGCombinerInfo &DCI)
const {
9350 SelectionDAG &DAG = DCI.DAG;
9357 if (OpcodeCand0 && OpcodeCand0 == OpcodeCand1)
9358 return DAG.
getNode(OpcodeCand0, SDLoc(
N),
N->getValueType(0), Op0, Op1);
9363SDValue SystemZTargetLowering::combineINTRINSIC(
9364 SDNode *
N, DAGCombinerInfo &DCI)
const {
9365 SelectionDAG &DAG = DCI.DAG;
9367 unsigned Id =
N->getConstantOperandVal(1);
9371 case Intrinsic::s390_vll:
9372 case Intrinsic::s390_vlrl:
9374 if (
C->getZExtValue() >= 15)
9375 return DAG.
getLoad(
N->getValueType(0), SDLoc(
N),
N->getOperand(0),
9376 N->getOperand(3), MachinePointerInfo());
9379 case Intrinsic::s390_vstl:
9380 case Intrinsic::s390_vstrl:
9382 if (
C->getZExtValue() >= 15)
9383 return DAG.
getStore(
N->getOperand(0), SDLoc(
N),
N->getOperand(2),
9384 N->getOperand(4), MachinePointerInfo());
9399 switch(
N->getOpcode()) {
9406 case ISD::LOAD:
return combineLOAD(
N, DCI);
9407 case ISD::STORE:
return combineSTORE(
N, DCI);
9414 case ISD::FP_EXTEND:
return combineFP_EXTEND(
N, DCI);
9424 case ISD::SRA:
return combineShiftToMulAddHigh(
N, DCI);
9425 case ISD::MUL:
return combineMUL(
N, DCI);
9429 case ISD::UREM:
return combineIntDIVREM(
N, DCI);
9441 EVT VT =
Op.getValueType();
9444 unsigned Opcode =
Op.getOpcode();
9446 unsigned Id =
Op.getConstantOperandVal(0);
9448 case Intrinsic::s390_vpksh:
9449 case Intrinsic::s390_vpksf:
9450 case Intrinsic::s390_vpksg:
9451 case Intrinsic::s390_vpkshs:
9452 case Intrinsic::s390_vpksfs:
9453 case Intrinsic::s390_vpksgs:
9454 case Intrinsic::s390_vpklsh:
9455 case Intrinsic::s390_vpklsf:
9456 case Intrinsic::s390_vpklsg:
9457 case Intrinsic::s390_vpklshs:
9458 case Intrinsic::s390_vpklsfs:
9459 case Intrinsic::s390_vpklsgs:
9461 SrcDemE = DemandedElts;
9464 SrcDemE = SrcDemE.
trunc(NumElts / 2);
9467 case Intrinsic::s390_vuphb:
9468 case Intrinsic::s390_vuphh:
9469 case Intrinsic::s390_vuphf:
9470 case Intrinsic::s390_vuplhb:
9471 case Intrinsic::s390_vuplhh:
9472 case Intrinsic::s390_vuplhf:
9473 SrcDemE =
APInt(NumElts * 2, 0);
9476 case Intrinsic::s390_vuplb:
9477 case Intrinsic::s390_vuplhw:
9478 case Intrinsic::s390_vuplf:
9479 case Intrinsic::s390_vupllb:
9480 case Intrinsic::s390_vupllh:
9481 case Intrinsic::s390_vupllf:
9482 SrcDemE =
APInt(NumElts * 2, 0);
9485 case Intrinsic::s390_vpdi: {
9487 SrcDemE =
APInt(NumElts, 0);
9488 if (!DemandedElts[OpNo - 1])
9490 unsigned Mask =
Op.getConstantOperandVal(3);
9491 unsigned MaskBit = ((OpNo - 1) ? 1 : 4);
9493 SrcDemE.
setBit((Mask & MaskBit)? 1 : 0);
9496 case Intrinsic::s390_vsldb: {
9498 assert(VT == MVT::v16i8 &&
"Unexpected type.");
9499 unsigned FirstIdx =
Op.getConstantOperandVal(3);
9500 assert (FirstIdx > 0 && FirstIdx < 16 &&
"Unused operand.");
9501 unsigned NumSrc0Els = 16 - FirstIdx;
9502 SrcDemE =
APInt(NumElts, 0);
9504 APInt DemEls = DemandedElts.
trunc(NumSrc0Els);
9507 APInt DemEls = DemandedElts.
lshr(NumSrc0Els);
9512 case Intrinsic::s390_vperm:
9523 SrcDemE =
APInt(1, 1);
9526 SrcDemE = DemandedElts;
9537 const APInt &DemandedElts,
9552 const APInt &DemandedElts,
9554 unsigned Depth)
const {
9558 unsigned Tmp0, Tmp1;
9563 EVT VT =
Op.getValueType();
9564 if (
Op.getResNo() != 0 || VT == MVT::Untyped)
9567 "KnownBits does not match VT in bitwidth");
9570 "DemandedElts does not match VT number of elements");
9572 unsigned Opcode =
Op.getOpcode();
9574 bool IsLogical =
false;
9575 unsigned Id =
Op.getConstantOperandVal(0);
9577 case Intrinsic::s390_vpksh:
9578 case Intrinsic::s390_vpksf:
9579 case Intrinsic::s390_vpksg:
9580 case Intrinsic::s390_vpkshs:
9581 case Intrinsic::s390_vpksfs:
9582 case Intrinsic::s390_vpksgs:
9583 case Intrinsic::s390_vpklsh:
9584 case Intrinsic::s390_vpklsf:
9585 case Intrinsic::s390_vpklsg:
9586 case Intrinsic::s390_vpklshs:
9587 case Intrinsic::s390_vpklsfs:
9588 case Intrinsic::s390_vpklsgs:
9589 case Intrinsic::s390_vpdi:
9590 case Intrinsic::s390_vsldb:
9591 case Intrinsic::s390_vperm:
9594 case Intrinsic::s390_vuplhb:
9595 case Intrinsic::s390_vuplhh:
9596 case Intrinsic::s390_vuplhf:
9597 case Intrinsic::s390_vupllb:
9598 case Intrinsic::s390_vupllh:
9599 case Intrinsic::s390_vupllf:
9602 case Intrinsic::s390_vuphb:
9603 case Intrinsic::s390_vuphh:
9604 case Intrinsic::s390_vuphf:
9605 case Intrinsic::s390_vuplb:
9606 case Intrinsic::s390_vuplhw:
9607 case Intrinsic::s390_vuplf: {
9649 if (
LHS == 1)
return 1;
9652 if (
RHS == 1)
return 1;
9653 unsigned Common = std::min(
LHS,
RHS);
9654 unsigned SrcBitWidth =
Op.getOperand(OpNo).getScalarValueSizeInBits();
9655 EVT VT =
Op.getValueType();
9657 if (SrcBitWidth > VTBits) {
9658 unsigned SrcExtraBits = SrcBitWidth - VTBits;
9659 if (Common > SrcExtraBits)
9660 return (Common - SrcExtraBits);
9663 assert (SrcBitWidth == VTBits &&
"Expected operands of same bitwidth.");
9670 unsigned Depth)
const {
9671 if (
Op.getResNo() != 0)
9673 unsigned Opcode =
Op.getOpcode();
9675 unsigned Id =
Op.getConstantOperandVal(0);
9677 case Intrinsic::s390_vpksh:
9678 case Intrinsic::s390_vpksf:
9679 case Intrinsic::s390_vpksg:
9680 case Intrinsic::s390_vpkshs:
9681 case Intrinsic::s390_vpksfs:
9682 case Intrinsic::s390_vpksgs:
9683 case Intrinsic::s390_vpklsh:
9684 case Intrinsic::s390_vpklsf:
9685 case Intrinsic::s390_vpklsg:
9686 case Intrinsic::s390_vpklshs:
9687 case Intrinsic::s390_vpklsfs:
9688 case Intrinsic::s390_vpklsgs:
9689 case Intrinsic::s390_vpdi:
9690 case Intrinsic::s390_vsldb:
9691 case Intrinsic::s390_vperm:
9693 case Intrinsic::s390_vuphb:
9694 case Intrinsic::s390_vuphh:
9695 case Intrinsic::s390_vuphf:
9696 case Intrinsic::s390_vuplb:
9697 case Intrinsic::s390_vuplhw:
9698 case Intrinsic::s390_vuplf: {
9702 EVT VT =
Op.getValueType();
9726 switch (
Op->getOpcode()) {
9739 "Unexpected stack alignment");
9742 unsigned StackProbeSize =
9745 StackProbeSize &= ~(StackAlign - 1);
9746 return StackProbeSize ? StackProbeSize : StackAlign;
9763 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9769 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9785 if (
MI.readsRegister(SystemZ::CC,
nullptr))
9787 if (
MI.definesRegister(SystemZ::CC,
nullptr))
9793 if (miI ==
MBB->end()) {
9795 if (Succ->isLiveIn(SystemZ::CC))
9806 switch (
MI.getOpcode()) {
9807 case SystemZ::Select32:
9808 case SystemZ::Select64:
9809 case SystemZ::Select128:
9810 case SystemZ::SelectF32:
9811 case SystemZ::SelectF64:
9812 case SystemZ::SelectF128:
9813 case SystemZ::SelectVR32:
9814 case SystemZ::SelectVR64:
9815 case SystemZ::SelectVR128:
9847 for (
auto *
MI : Selects) {
9848 Register DestReg =
MI->getOperand(0).getReg();
9849 Register TrueReg =
MI->getOperand(1).getReg();
9850 Register FalseReg =
MI->getOperand(2).getReg();
9855 if (
MI->getOperand(4).getImm() == (CCValid ^ CCMask))
9858 if (
auto It = RegRewriteTable.
find(TrueReg); It != RegRewriteTable.
end())
9859 TrueReg = It->second.first;
9861 if (
auto It = RegRewriteTable.
find(FalseReg); It != RegRewriteTable.
end())
9862 FalseReg = It->second.second;
9865 BuildMI(*SinkMBB, SinkInsertionPoint,
DL,
TII->get(SystemZ::PHI), DestReg)
9870 RegRewriteTable[DestReg] = std::make_pair(TrueReg, FalseReg);
9881 auto *TFL = Subtarget.getFrameLowering<SystemZFrameLowering>();
9882 assert(TFL->hasReservedCallFrame(MF) &&
9883 "ADJSTACKDOWN and ADJSTACKUP should be no-ops");
9888 uint32_t NumBytes =
MI.getOperand(0).getImm();
9893 MI.eraseFromParent();
9902 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
9904 unsigned CCValid =
MI.getOperand(3).getImm();
9905 unsigned CCMask =
MI.getOperand(4).getImm();
9910 SmallVector<MachineInstr*, 8> Selects;
9911 SmallVector<MachineInstr*, 8> DbgValues;
9917 assert(NextMI.getOperand(3).getImm() == CCValid &&
9918 "Bad CCValid operands since CC was not redefined.");
9919 if (NextMI.getOperand(4).getImm() == CCMask ||
9920 NextMI.getOperand(4).getImm() == (CCValid ^ CCMask)) {
9926 if (NextMI.definesRegister(SystemZ::CC,
nullptr) ||
9927 NextMI.usesCustomInsertionHook())
9930 for (
auto *SelMI : Selects)
9931 if (NextMI.readsVirtualRegister(SelMI->getOperand(0).getReg())) {
9935 if (NextMI.isDebugInstr()) {
9937 assert(NextMI.isDebugValue() &&
"Unhandled debug opcode.");
9940 }
else if (User || ++
Count > 20)
9944 MachineInstr *LastMI = Selects.back();
9945 bool CCKilled = (LastMI->
killsRegister(SystemZ::CC,
nullptr) ||
9947 MachineBasicBlock *StartMBB =
MBB;
9977 for (
auto *SelMI : Selects)
9978 SelMI->eraseFromParent();
9981 for (
auto *DbgMI : DbgValues)
9982 MBB->
splice(InsertPos, StartMBB, DbgMI);
9993 unsigned StoreOpcode,
9994 unsigned STOCOpcode,
9995 bool Invert)
const {
9996 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
9999 MachineOperand
Base =
MI.getOperand(1);
10000 int64_t Disp =
MI.getOperand(2).getImm();
10001 Register IndexReg =
MI.getOperand(3).getReg();
10002 unsigned CCValid =
MI.getOperand(4).getImm();
10003 unsigned CCMask =
MI.getOperand(5).getImm();
10006 StoreOpcode =
TII->getOpcodeForOffset(StoreOpcode, Disp);
10010 MachineMemOperand *MMO =
nullptr;
10011 for (
auto *
I :
MI.memoperands())
10012 if (
I->isStore()) {
10020 if (STOCOpcode && !IndexReg && Subtarget.hasLoadStoreOnCond()) {
10032 MI.eraseFromParent();
10040 MachineBasicBlock *StartMBB =
MBB;
10046 if (!
MI.killsRegister(SystemZ::CC,
nullptr) &&
10073 MI.eraseFromParent();
10083 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
10091 MachineBasicBlock *StartMBB =
MBB;
10109 int HiOpcode =
Unsigned? SystemZ::VECLG : SystemZ::VECG;
10128 Register Temp =
MRI.createVirtualRegister(&SystemZ::VR128BitRegClass);
10136 MI.eraseFromParent();
10147 bool Invert)
const {
10149 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
10156 int64_t Disp =
MI.getOperand(2).getImm();
10158 Register BitShift =
MI.getOperand(4).getReg();
10159 Register NegBitShift =
MI.getOperand(5).getReg();
10160 unsigned BitSize =
MI.getOperand(6).getImm();
10164 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
10165 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
10166 assert(LOpcode && CSOpcode &&
"Displacement out of range");
10169 Register OrigVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
10170 Register OldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
10171 Register NewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
10172 Register RotatedOldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
10173 Register RotatedNewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
10176 MachineBasicBlock *StartMBB =
MBB;
10204 Register Tmp =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
10209 }
else if (BinOpcode)
10232 MI.eraseFromParent();
10243 unsigned KeepOldMask)
const {
10245 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
10251 int64_t Disp =
MI.getOperand(2).getImm();
10253 Register BitShift =
MI.getOperand(4).getReg();
10254 Register NegBitShift =
MI.getOperand(5).getReg();
10255 unsigned BitSize =
MI.getOperand(6).getImm();
10259 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
10260 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
10261 assert(LOpcode && CSOpcode &&
"Displacement out of range");
10264 Register OrigVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
10265 Register OldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
10266 Register NewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
10267 Register RotatedOldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
10268 Register RotatedAltVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
10269 Register RotatedNewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
10272 MachineBasicBlock *StartMBB =
MBB;
10336 MI.eraseFromParent();
10346 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
10352 int64_t Disp =
MI.getOperand(2).getImm();
10353 Register CmpVal =
MI.getOperand(3).getReg();
10354 Register OrigSwapVal =
MI.getOperand(4).getReg();
10355 Register BitShift =
MI.getOperand(5).getReg();
10356 Register NegBitShift =
MI.getOperand(6).getReg();
10357 int64_t BitSize =
MI.getOperand(7).getImm();
10360 const TargetRegisterClass *RC = &SystemZ::GR32BitRegClass;
10363 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
10364 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
10365 unsigned ZExtOpcode = BitSize == 8 ? SystemZ::LLCR : SystemZ::LLHR;
10366 assert(LOpcode && CSOpcode &&
"Displacement out of range");
10369 Register OrigOldVal =
MRI.createVirtualRegister(RC);
10371 Register SwapVal =
MRI.createVirtualRegister(RC);
10372 Register StoreVal =
MRI.createVirtualRegister(RC);
10373 Register OldValRot =
MRI.createVirtualRegister(RC);
10374 Register RetryOldVal =
MRI.createVirtualRegister(RC);
10375 Register RetrySwapVal =
MRI.createVirtualRegister(RC);
10378 MachineBasicBlock *StartMBB =
MBB;
10450 if (!
MI.registerDefIsDead(SystemZ::CC,
nullptr))
10453 MI.eraseFromParent();
10461 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
10466 .
add(
MI.getOperand(1))
10467 .
addImm(SystemZ::subreg_h64)
10468 .
add(
MI.getOperand(2))
10469 .
addImm(SystemZ::subreg_l64);
10470 MI.eraseFromParent();
10479 bool ClearEven)
const {
10481 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
10487 Register In128 =
MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
10491 Register NewIn128 =
MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
10492 Register Zero64 =
MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
10503 MI.eraseFromParent();
10510 unsigned Opcode,
bool IsMemset)
const {
10512 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
10517 uint64_t DestDisp =
MI.getOperand(1).getImm();
10522 auto foldDisplIfNeeded = [&](MachineOperand &
Base, uint64_t &Disp) ->
void {
10524 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
10525 unsigned Opcode =
TII->getOpcodeForOffset(SystemZ::LA, Disp);
10535 SrcDisp =
MI.getOperand(3).getImm();
10537 SrcBase = DestBase;
10538 SrcDisp = DestDisp++;
10539 foldDisplIfNeeded(DestBase, DestDisp);
10542 MachineOperand &LengthMO =
MI.getOperand(IsMemset ? 2 : 4);
10543 bool IsImmForm = LengthMO.
isImm();
10544 bool IsRegForm = !IsImmForm;
10547 auto insertMemMemOp = [&](MachineBasicBlock *InsMBB,
10549 MachineOperand DBase, uint64_t DDisp,
10550 MachineOperand
SBase, uint64_t SDisp,
10551 unsigned Length) ->
void {
10555 if (ByteMO.
isImm())
10570 bool NeedsLoop =
false;
10571 uint64_t ImmLength = 0;
10572 Register LenAdjReg = SystemZ::NoRegister;
10574 ImmLength = LengthMO.
getImm();
10575 ImmLength += IsMemset ? 2 : 1;
10576 if (ImmLength == 0) {
10577 MI.eraseFromParent();
10580 if (Opcode == SystemZ::CLC) {
10581 if (ImmLength > 3 * 256)
10591 }
else if (ImmLength > 6 * 256)
10599 LenAdjReg = LengthMO.
getReg();
10604 MachineBasicBlock *EndMBB =
10605 (Opcode == SystemZ::CLC && (ImmLength > 256 || NeedsLoop)
10611 MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
10613 TII->loadImmediate(*
MBB,
MI, StartCountReg, ImmLength / 256);
10623 auto loadZeroAddress = [&]() -> MachineOperand {
10624 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
10628 if (DestBase.
isReg() && DestBase.
getReg() == SystemZ::NoRegister)
10629 DestBase = loadZeroAddress();
10630 if (SrcBase.
isReg() && SrcBase.
getReg() == SystemZ::NoRegister)
10631 SrcBase = HaveSingleBase ? DestBase : loadZeroAddress();
10633 MachineBasicBlock *StartMBB =
nullptr;
10634 MachineBasicBlock *LoopMBB =
nullptr;
10635 MachineBasicBlock *NextMBB =
nullptr;
10636 MachineBasicBlock *DoneMBB =
nullptr;
10637 MachineBasicBlock *AllDoneMBB =
nullptr;
10641 (HaveSingleBase ? StartSrcReg :
forceReg(
MI, DestBase,
TII));
10643 const TargetRegisterClass *RC = &SystemZ::ADDR64BitRegClass;
10644 Register ThisSrcReg =
MRI.createVirtualRegister(RC);
10646 (HaveSingleBase ? ThisSrcReg :
MRI.createVirtualRegister(RC));
10647 Register NextSrcReg =
MRI.createVirtualRegister(RC);
10649 (HaveSingleBase ? NextSrcReg :
MRI.createVirtualRegister(RC));
10650 RC = &SystemZ::GR64BitRegClass;
10651 Register ThisCountReg =
MRI.createVirtualRegister(RC);
10652 Register NextCountReg =
MRI.createVirtualRegister(RC);
10678 MBB = MemsetOneCheckMBB;
10689 MBB = MemsetOneMBB;
10721 if (EndMBB && !ImmLength)
10743 if (!HaveSingleBase)
10750 if (Opcode == SystemZ::MVC)
10777 if (!HaveSingleBase)
10799 Register RemSrcReg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
10800 Register RemDestReg = HaveSingleBase ? RemSrcReg
10801 :
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
10805 if (!HaveSingleBase)
10813 MachineInstrBuilder EXRL_MIB =
10821 if (Opcode != SystemZ::MVC) {
10831 while (ImmLength > 0) {
10832 uint64_t ThisLength = std::min(ImmLength, uint64_t(256));
10835 foldDisplIfNeeded(DestBase, DestDisp);
10836 foldDisplIfNeeded(SrcBase, SrcDisp);
10837 insertMemMemOp(
MBB,
MI, DestBase, DestDisp, SrcBase, SrcDisp, ThisLength);
10838 DestDisp += ThisLength;
10839 SrcDisp += ThisLength;
10840 ImmLength -= ThisLength;
10843 if (EndMBB && ImmLength > 0) {
10859 MI.eraseFromParent();
10868 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
10872 uint64_t End1Reg =
MI.getOperand(0).getReg();
10873 uint64_t Start1Reg =
MI.getOperand(1).getReg();
10874 uint64_t Start2Reg =
MI.getOperand(2).getReg();
10875 uint64_t CharReg =
MI.getOperand(3).getReg();
10877 const TargetRegisterClass *RC = &SystemZ::GR64BitRegClass;
10878 uint64_t This1Reg =
MRI.createVirtualRegister(RC);
10879 uint64_t This2Reg =
MRI.createVirtualRegister(RC);
10880 uint64_t End2Reg =
MRI.createVirtualRegister(RC);
10882 MachineBasicBlock *StartMBB =
MBB;
10918 MI.eraseFromParent();
10925 bool NoFloat)
const {
10927 const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
10928 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
10931 MI.setDesc(
TII->get(Opcode));
10935 uint64_t Control =
MI.getOperand(2).getImm();
10936 static const unsigned GPRControlBit[16] = {
10937 0x8000, 0x8000, 0x4000, 0x4000, 0x2000, 0x2000, 0x1000, 0x1000,
10938 0x0800, 0x0800, 0x0400, 0x0400, 0x0200, 0x0200, 0x0100, 0x0100
10940 Control |= GPRControlBit[15];
10941 if (TFI->
hasFP(MF))
10942 Control |= GPRControlBit[11];
10943 MI.getOperand(2).setImm(Control);
10946 for (
int I = 0;
I < 16;
I++) {
10947 if ((Control & GPRControlBit[
I]) == 0) {
10954 if (!NoFloat && (Control & 4) != 0) {
10955 if (Subtarget.hasVector()) {
10973 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
10976 Register SrcReg =
MI.getOperand(0).getReg();
10979 const TargetRegisterClass *RC =
MRI->getRegClass(SrcReg);
10980 Register DstReg =
MRI->createVirtualRegister(RC);
10987 MI.eraseFromParent();
10996 const SystemZInstrInfo *
TII = Subtarget.getInstrInfo();
10999 Register DstReg =
MI.getOperand(0).getReg();
11000 Register SizeReg =
MI.getOperand(2).getReg();
11002 MachineBasicBlock *StartMBB =
MBB;
11012 Register PHIReg =
MRI->createVirtualRegister(&SystemZ::ADDR64BitRegClass);
11013 Register IncReg =
MRI->createVirtualRegister(&SystemZ::ADDR64BitRegClass);
11078 MI.eraseFromParent();
11082SDValue SystemZTargetLowering::
11085 auto *TFL = Subtarget.getFrameLowering<SystemZELFFrameLowering>();
11093 switch (
MI.getOpcode()) {
11094 case SystemZ::ADJCALLSTACKDOWN:
11095 case SystemZ::ADJCALLSTACKUP:
11096 return emitAdjCallStack(
MI,
MBB);
11098 case SystemZ::Select32:
11099 case SystemZ::Select64:
11100 case SystemZ::Select128:
11101 case SystemZ::SelectF32:
11102 case SystemZ::SelectF64:
11103 case SystemZ::SelectF128:
11104 case SystemZ::SelectVR32:
11105 case SystemZ::SelectVR64:
11106 case SystemZ::SelectVR128:
11107 return emitSelect(
MI,
MBB);
11109 case SystemZ::CondStore8Mux:
11110 return emitCondStore(
MI,
MBB, SystemZ::STCMux, 0,
false);
11111 case SystemZ::CondStore8MuxInv:
11112 return emitCondStore(
MI,
MBB, SystemZ::STCMux, 0,
true);
11113 case SystemZ::CondStore16Mux:
11114 return emitCondStore(
MI,
MBB, SystemZ::STHMux, 0,
false);
11115 case SystemZ::CondStore16MuxInv:
11116 return emitCondStore(
MI,
MBB, SystemZ::STHMux, 0,
true);
11117 case SystemZ::CondStore32Mux:
11118 return emitCondStore(
MI,
MBB, SystemZ::STMux, SystemZ::STOCMux,
false);
11119 case SystemZ::CondStore32MuxInv:
11120 return emitCondStore(
MI,
MBB, SystemZ::STMux, SystemZ::STOCMux,
true);
11121 case SystemZ::CondStore8:
11122 return emitCondStore(
MI,
MBB, SystemZ::STC, 0,
false);
11123 case SystemZ::CondStore8Inv:
11124 return emitCondStore(
MI,
MBB, SystemZ::STC, 0,
true);
11125 case SystemZ::CondStore16:
11126 return emitCondStore(
MI,
MBB, SystemZ::STH, 0,
false);
11127 case SystemZ::CondStore16Inv:
11128 return emitCondStore(
MI,
MBB, SystemZ::STH, 0,
true);
11129 case SystemZ::CondStore32:
11130 return emitCondStore(
MI,
MBB, SystemZ::ST, SystemZ::STOC,
false);
11131 case SystemZ::CondStore32Inv:
11132 return emitCondStore(
MI,
MBB, SystemZ::ST, SystemZ::STOC,
true);
11133 case SystemZ::CondStore64:
11134 return emitCondStore(
MI,
MBB, SystemZ::STG, SystemZ::STOCG,
false);
11135 case SystemZ::CondStore64Inv:
11136 return emitCondStore(
MI,
MBB, SystemZ::STG, SystemZ::STOCG,
true);
11137 case SystemZ::CondStoreF32:
11138 return emitCondStore(
MI,
MBB, SystemZ::STE, 0,
false);
11139 case SystemZ::CondStoreF32Inv:
11140 return emitCondStore(
MI,
MBB, SystemZ::STE, 0,
true);
11141 case SystemZ::CondStoreF64:
11142 return emitCondStore(
MI,
MBB, SystemZ::STD, 0,
false);
11143 case SystemZ::CondStoreF64Inv:
11144 return emitCondStore(
MI,
MBB, SystemZ::STD, 0,
true);
11146 case SystemZ::SCmp128Hi:
11147 return emitICmp128Hi(
MI,
MBB,
false);
11148 case SystemZ::UCmp128Hi:
11149 return emitICmp128Hi(
MI,
MBB,
true);
11151 case SystemZ::PAIR128:
11152 return emitPair128(
MI,
MBB);
11153 case SystemZ::AEXT128:
11154 return emitExt128(
MI,
MBB,
false);
11155 case SystemZ::ZEXT128:
11156 return emitExt128(
MI,
MBB,
true);
11158 case SystemZ::ATOMIC_SWAPW:
11159 return emitAtomicLoadBinary(
MI,
MBB, 0);
11161 case SystemZ::ATOMIC_LOADW_AR:
11162 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::AR);
11163 case SystemZ::ATOMIC_LOADW_AFI:
11164 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::AFI);
11166 case SystemZ::ATOMIC_LOADW_SR:
11167 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::SR);
11169 case SystemZ::ATOMIC_LOADW_NR:
11170 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NR);
11171 case SystemZ::ATOMIC_LOADW_NILH:
11172 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NILH);
11174 case SystemZ::ATOMIC_LOADW_OR:
11175 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::OR);
11176 case SystemZ::ATOMIC_LOADW_OILH:
11177 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::OILH);
11179 case SystemZ::ATOMIC_LOADW_XR:
11180 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::XR);
11181 case SystemZ::ATOMIC_LOADW_XILF:
11182 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::XILF);
11184 case SystemZ::ATOMIC_LOADW_NRi:
11185 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NR,
true);
11186 case SystemZ::ATOMIC_LOADW_NILHi:
11187 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NILH,
true);
11189 case SystemZ::ATOMIC_LOADW_MIN:
11191 case SystemZ::ATOMIC_LOADW_MAX:
11193 case SystemZ::ATOMIC_LOADW_UMIN:
11195 case SystemZ::ATOMIC_LOADW_UMAX:
11198 case SystemZ::ATOMIC_CMP_SWAPW:
11199 return emitAtomicCmpSwapW(
MI,
MBB);
11200 case SystemZ::MVCImm:
11201 case SystemZ::MVCReg:
11202 return emitMemMemWrapper(
MI,
MBB, SystemZ::MVC);
11203 case SystemZ::NCImm:
11204 return emitMemMemWrapper(
MI,
MBB, SystemZ::NC);
11205 case SystemZ::OCImm:
11206 return emitMemMemWrapper(
MI,
MBB, SystemZ::OC);
11207 case SystemZ::XCImm:
11208 case SystemZ::XCReg:
11209 return emitMemMemWrapper(
MI,
MBB, SystemZ::XC);
11210 case SystemZ::CLCImm:
11211 case SystemZ::CLCReg:
11212 return emitMemMemWrapper(
MI,
MBB, SystemZ::CLC);
11213 case SystemZ::MemsetImmImm:
11214 case SystemZ::MemsetImmReg:
11215 case SystemZ::MemsetRegImm:
11216 case SystemZ::MemsetRegReg:
11217 return emitMemMemWrapper(
MI,
MBB, SystemZ::MVC,
true);
11218 case SystemZ::CLSTLoop:
11219 return emitStringWrapper(
MI,
MBB, SystemZ::CLST);
11220 case SystemZ::MVSTLoop:
11221 return emitStringWrapper(
MI,
MBB, SystemZ::MVST);
11222 case SystemZ::SRSTLoop:
11223 return emitStringWrapper(
MI,
MBB, SystemZ::SRST);
11224 case SystemZ::TBEGIN:
11225 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGIN,
false);
11226 case SystemZ::TBEGIN_nofloat:
11227 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGIN,
true);
11228 case SystemZ::TBEGINC:
11229 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGINC,
true);
11230 case SystemZ::LTEBRCompare_Pseudo:
11231 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTEBR);
11232 case SystemZ::LTDBRCompare_Pseudo:
11233 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTDBR);
11234 case SystemZ::LTXBRCompare_Pseudo:
11235 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTXBR);
11237 case SystemZ::PROBED_ALLOCA:
11238 return emitProbedAlloca(
MI,
MBB);
11239 case SystemZ::EH_SjLj_SetJmp:
11241 case SystemZ::EH_SjLj_LongJmp:
11244 case TargetOpcode::STACKMAP:
11245 case TargetOpcode::PATCHPOINT:
11256SystemZTargetLowering::getRepRegClassFor(
MVT VT)
const {
11257 if (VT == MVT::Untyped)
11258 return &SystemZ::ADDR128BitRegClass;
11284 DAG.
getMachineNode(SystemZ::EFPC, dl, {MVT::i32, MVT::Other}, Chain), 0);
11304 EVT VT =
Op.getValueType();
11305 Op =
Op.getOperand(0);
11306 EVT OpVT =
Op.getValueType();
11308 assert(OpVT.
isVector() &&
"Operand type for VECREDUCE_ADD is not a vector.");
11339 const AttributeList &Attrs =
F->getAttributes();
11340 if (Attrs.hasRetAttrs())
11341 OS << Attrs.getAsString(AttributeList::ReturnIndex) <<
" ";
11342 OS << *
F->getReturnType() <<
" @" <<
F->getName() <<
"(";
11343 for (
unsigned I = 0,
E = FT->getNumParams();
I !=
E; ++
I) {
11346 OS << *FT->getParamType(
I);
11348 for (
auto A : {Attribute::SExt, Attribute::ZExt, Attribute::NoExt})
11355bool SystemZTargetLowering::isInternal(
const Function *Fn)
const {
11356 std::map<const Function *, bool>::iterator Itr = IsInternalCache.find(Fn);
11357 if (Itr == IsInternalCache.end())
11358 Itr = IsInternalCache
11359 .insert(std::pair<const Function *, bool>(
11362 return Itr->second;
11365void SystemZTargetLowering::
11373 bool IsInternal =
false;
11374 const Function *CalleeFn =
nullptr;
11377 IsInternal = isInternal(CalleeFn);
11378 if (!IsInternal && !verifyNarrowIntegerArgs(Outs)) {
11379 errs() <<
"ERROR: Missing extension attribute of passed "
11380 <<
"value in call to function:\n" <<
"Callee: ";
11381 if (CalleeFn !=
nullptr)
11385 errs() <<
"Caller: ";
11391void SystemZTargetLowering::
11399 if (!isInternal(
F) && !verifyNarrowIntegerArgs(Outs)) {
11400 errs() <<
"ERROR: Missing extension attribute of returned "
11401 <<
"value from function:\n";
11409bool SystemZTargetLowering::verifyNarrowIntegerArgs(
11411 if (!Subtarget.isTargetELF())
11420 for (
unsigned i = 0; i < Outs.
size(); ++i) {
11421 MVT VT = Outs[i].VT;
11422 ISD::ArgFlagsTy
Flags = Outs[i].Flags;
11425 "Unexpected integer argument VT.");
11426 if (VT == MVT::i32 &&
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
AMDGPU Register Bank Select
static bool isZeroVector(SDValue N)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis false
Function Alias Analysis Results
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
const HexagonInstrInfo * TII
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
static bool isSelectPseudo(MachineInstr &MI)
static bool isUndef(const MachineInstr &MI)
Register const TargetRegisterInfo * TRI
Promote Memory to Register
uint64_t IntrinsicInst * II
static constexpr MCPhysReg SPReg
const SmallVectorImpl< MachineOperand > & Cond
static cl::opt< RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode > Mode("regalloc-enable-advisor", cl::Hidden, cl::init(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default), cl::desc("Enable regalloc advisor mode"), cl::values(clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Default, "default", "Default"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Release, "release", "precompiled"), clEnumValN(RegAllocEvictionAdvisorAnalysisLegacy::AdvisorMode::Development, "development", "for training")))
This file defines the SmallSet class.
static SDValue getI128Select(SelectionDAG &DAG, const SDLoc &DL, Comparison C, SDValue TrueOp, SDValue FalseOp)
static SmallVector< SDValue, 4 > simplifyAssumingCCVal(SDValue &Val, SDValue &CC, SelectionDAG &DAG)
static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static void printFunctionArgExts(const Function *F, raw_fd_ostream &OS)
static void adjustForLTGFR(Comparison &C)
static void adjustSubwordCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static SDValue joinDwords(SelectionDAG &DAG, const SDLoc &DL, SDValue Op0, SDValue Op1)
static cl::opt< bool > EnableIntArgExtCheck("argext-abi-check", cl::init(false), cl::desc("Verify that narrow int args are properly extended per the " "SystemZ ABI."))
static bool isOnlyUsedByStores(SDValue StoredVal, SelectionDAG &DAG)
static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT, unsigned Opcode, SDValue Op0, SDValue Op1, SDValue &Even, SDValue &Odd)
static void adjustForRedundantAnd(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static SDValue lowerAddrSpaceCast(SDValue Op, SelectionDAG &DAG)
static SDValue buildScalarToVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Value)
static SDValue lowerI128ToGR128(SelectionDAG &DAG, SDValue In)
static bool isSimpleShift(SDValue N, unsigned &ShiftVal)
static bool isI128MovedToParts(LoadSDNode *LD, SDNode *&LoPart, SDNode *&HiPart)
static bool chooseShuffleOpNos(int *OpNos, unsigned &OpNo0, unsigned &OpNo1)
static uint32_t findZeroVectorIdx(SDValue *Ops, unsigned Num)
static bool isVectorElementSwap(ArrayRef< int > M, EVT VT)
static void getCSAddressAndShifts(SDValue Addr, SelectionDAG &DAG, SDLoc DL, SDValue &AlignedAddr, SDValue &BitShift, SDValue &NegBitShift)
static bool isShlDoublePermute(const SmallVectorImpl< int > &Bytes, unsigned &StartIndex, unsigned &OpNo0, unsigned &OpNo1)
static SDValue getPermuteNode(SelectionDAG &DAG, const SDLoc &DL, const Permute &P, SDValue Op0, SDValue Op1)
static SDNode * emitIntrinsicWithCCAndChain(SelectionDAG &DAG, SDValue Op, unsigned Opcode)
static SDValue getCCResult(SelectionDAG &DAG, SDValue CCReg)
static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode, unsigned &CCValid)
static void lowerMUL_LOHI32(SelectionDAG &DAG, const SDLoc &DL, unsigned Extend, SDValue Op0, SDValue Op1, SDValue &Hi, SDValue &Lo)
static bool isF128MovedToParts(LoadSDNode *LD, SDNode *&LoPart, SDNode *&HiPart)
static void createPHIsForSelects(SmallVector< MachineInstr *, 8 > &Selects, MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB, MachineBasicBlock *SinkMBB)
static SDValue getGeneralPermuteNode(SelectionDAG &DAG, const SDLoc &DL, SDValue *Ops, const SmallVectorImpl< int > &Bytes)
static unsigned getVectorComparisonOrInvert(ISD::CondCode CC, CmpMode Mode, bool &Invert)
static unsigned CCMaskForCondCode(ISD::CondCode CC)
static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static void adjustForFNeg(Comparison &C)
static bool isScalarToVector(SDValue Op)
static SDValue emitSETCC(SelectionDAG &DAG, const SDLoc &DL, SDValue CCReg, unsigned CCValid, unsigned CCMask)
static bool matchPermute(const SmallVectorImpl< int > &Bytes, const Permute &P, unsigned &OpNo0, unsigned &OpNo1)
static bool isAddCarryChain(SDValue Carry)
static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static MachineOperand earlyUseOperand(MachineOperand Op)
static bool canUseSiblingCall(const CCState &ArgCCInfo, SmallVectorImpl< CCValAssign > &ArgLocs, SmallVectorImpl< ISD::OutputArg > &Outs)
static bool getzOSCalleeAndADA(SelectionDAG &DAG, SDValue &Callee, SDValue &ADA, SDLoc &DL, SDValue &Chain)
static SDValue convertToF16(SDValue Op, SelectionDAG &DAG)
static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask, SelectionDAG &DAG)
static bool shouldSwapCmpOperands(const Comparison &C)
static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType)
static SDValue getADAEntry(SelectionDAG &DAG, SDValue Val, SDLoc DL, unsigned Offset, bool LoadAdr=false)
static SDNode * emitIntrinsicWithCC(SelectionDAG &DAG, SDValue Op, unsigned Opcode)
static void adjustForSubtraction(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static bool getVPermMask(SDValue ShuffleOp, SmallVectorImpl< int > &Bytes)
static const Permute PermuteForms[]
static std::pair< SDValue, int > findCCUse(const SDValue &Val)
static bool isI128MovedFromParts(SDValue Val, SDValue &LoPart, SDValue &HiPart)
static bool isSubBorrowChain(SDValue Carry)
static void adjustICmp128(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static APInt getDemandedSrcElements(SDValue Op, const APInt &DemandedElts, unsigned OpNo)
static SDValue getAbsolute(SelectionDAG &DAG, const SDLoc &DL, SDValue Op, bool IsNegative)
static unsigned computeNumSignBitsBinOp(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth, unsigned OpNo)
static SDValue expandBitCastI128ToF128(SelectionDAG &DAG, SDValue Src, const SDLoc &SL)
static SDValue tryBuildVectorShuffle(SelectionDAG &DAG, BuildVectorSDNode *BVN)
static SDValue convertFromF16(SDValue Op, SDLoc DL, SelectionDAG &DAG)
static unsigned getVectorComparison(ISD::CondCode CC, CmpMode Mode)
static SDValue lowerGR128ToI128(SelectionDAG &DAG, SDValue In)
static SDValue MergeInputChains(SDNode *N1, SDNode *N2)
static SDValue expandBitCastF128ToI128(SelectionDAG &DAG, SDValue Src, const SDLoc &SL)
static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask, uint64_t Mask, uint64_t CmpVal, unsigned ICmpType)
static bool isIntrinsicWithCC(SDValue Op, unsigned &Opcode, unsigned &CCValid)
static SDValue expandV4F32ToV2F64(SelectionDAG &DAG, int Start, const SDLoc &DL, SDValue Op, SDValue Chain)
static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1, ISD::CondCode Cond, const SDLoc &DL, SDValue Chain=SDValue(), bool IsSignaling=false)
static bool checkCCKill(MachineInstr &MI, MachineBasicBlock *MBB)
static Register forceReg(MachineInstr &MI, MachineOperand &Base, const SystemZInstrInfo *TII)
static bool is32Bit(EVT VT)
static std::pair< unsigned, const TargetRegisterClass * > parseRegisterNumber(StringRef Constraint, const TargetRegisterClass *RC, const unsigned *Map, unsigned Size)
static unsigned detectEvenOddMultiplyOperand(const SelectionDAG &DAG, const SystemZSubtarget &Subtarget, SDValue &Op)
static bool matchDoublePermute(const SmallVectorImpl< int > &Bytes, const Permute &P, SmallVectorImpl< int > &Transform)
static Comparison getIntrinsicCmp(SelectionDAG &DAG, unsigned Opcode, SDValue Call, unsigned CCValid, uint64_t CC, ISD::CondCode Cond)
static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg)
static AddressingMode getLoadStoreAddrMode(bool HasVector, Type *Ty)
static SDValue buildMergeScalars(SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Op0, SDValue Op1)
static void computeKnownBitsBinOp(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth, unsigned OpNo)
static bool getShuffleInput(const SmallVectorImpl< int > &Bytes, unsigned Start, unsigned BytesPerElement, int &Base)
static AddressingMode supportedAddressingMode(Instruction *I, bool HasVector)
static bool isF128MovedFromParts(SDValue Val, SDValue &LoPart, SDValue &HiPart)
static void adjustZeroCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
LLVM_ABI APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
uint64_t getZExtValue() const
Get zero extended value.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
unsigned getActiveBits() const
Compute the number of active bits in the value.
LLVM_ABI APInt trunc(unsigned width) const
Truncate to new width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isSingleWord() const
Determine if this APInt just has one word to store value.
LLVM_ABI void insertBits(const APInt &SubBits, unsigned bitPosition)
Insert the bits from a smaller APInt starting at bitPosition.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
an instruction that atomically reads a memory location, combines it with another value,...
BinOp getOperation() const
This class holds the attributes for a particular argument, parameter, function, or return value.
LLVM_ABI bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
LLVM_ABI StringRef getValueAsString() const
Return the attribute's value as a string.
static LLVM_ABI StringRef getNameFromAttrKind(Attribute::AttrKind AttrKind)
LLVM Basic Block Representation.
A "pseudo-class" with methods for operating on BUILD_VECTORs.
LLVM_ABI bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector.
LLVM_ABI bool isConstant() const
CCState - This class holds information needed while lowering arguments and return values.
LLVM_ABI void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
LLVM_ABI bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
LLVM_ABI void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
LLVM_ABI void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
LLVM_ABI void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
LocInfo getLocInfo() const
int64_t getLocMemOffset() const
This class represents a function call, abstracting a target machine's calling convention.
uint64_t getZExtValue() const
This is an important base class in LLVM.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
iterator find(const_arg_type_t< KeyT > Val)
bool hasAddressTaken(const User **=nullptr, bool IgnoreCallbackUses=false, bool IgnoreAssumeLikeCalls=true, bool IngoreLLVMUsed=false, bool IgnoreARCAttachedCall=false, bool IgnoreCastedDirectCall=false) const
hasAddressTaken - returns true if there are any uses of this function other than direct calls or invo...
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
uint64_t getFnAttributeAsParsedInteger(StringRef Kind, uint64_t Default=0) const
For a string attribute Kind, parse attribute as an integer.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
LLVM_ABI const GlobalObject * getAliaseeObject() const
bool hasLocalLinkage() const
bool hasPrivateLinkage() const
bool hasInternalLinkage() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
static auto integer_fixedlen_vector_valuetypes()
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static MVT getVectorVT(MVT VT, unsigned NumElements)
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
LLVM_ABI void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
LLVM_ABI void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
LLVM_ABI iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
void setMachineBlockAddressTaken()
Set this block to indicate that its address is used as something other than the target of a terminato...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setMaxCallFrameSize(uint64_t S)
LLVM_ABI int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setAdjustsStack(bool V)
void setFrameAddressIsTaken(bool T)
uint64_t getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
void setReturnAddressIsTaken(bool s)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
void push_back(MachineBasicBlock *MBB)
reverse_iterator rbegin()
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
BasicBlockListType::iterator iterator
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineFunctionProperties & getProperties() const
Get the function properties.
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineInstr - Allocate a new MachineInstr.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
bool killsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr kills the specified register.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
Flags getFlags() const
Return the raw flags of the source value,.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
Register getReg() const
getReg - Returns the register number.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Align getBaseAlign() const
Returns alignment and volatility of the memory access.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool hasOneUse() const
Return true if there is exactly one use of this node.
SDNodeFlags getFlags() const
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
iterator_range< user_iterator > users()
void setFlags(SDNodeFlags NewFlags)
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
bool isMachineOpcode() const
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
const APInt & getConstantOperandAPInt(unsigned i) const
uint64_t getScalarValueSizeInBits() const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getMachineOpcode() const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
LLVM_ABI SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT, unsigned Opcode)
Convert Op, which must be of integer type, to the integer type VT, by either any/sign/zero-extending ...
LLVM_ABI SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
const TargetSubtargetInfo & getSubtarget() const
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
LLVM_ABI SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
LLVM_ABI SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
LLVM_ABI SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
LLVM_ABI MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
LLVM_ABI SDValue getAtomicLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT MemVT, EVT VT, SDValue Chain, SDValue Ptr, MachineMemOperand *MMO)
LLVM_ABI SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
LLVM_ABI bool isConstantIntBuildVectorOrConstantInt(SDValue N, bool AllowOpaques=true) const
Test whether the given value is a constant int or similar node.
LLVM_ABI SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
LLVM_ABI SDValue getRegister(Register Reg, EVT VT)
LLVM_ABI SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getGLOBAL_OFFSET_TABLE(EVT VT)
Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
LLVM_ABI SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=LocationSize::precise(0), const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
LLVM_ABI SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, MachineMemOperand *MMO)
Gets a node for an atomic op, produces result (if relevant) and chain and takes 2 operands.
LLVM_ABI SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
LLVM_ABI SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
LLVM_ABI bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
LLVM_ABI SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
const DataLayout & getDataLayout() const
LLVM_ABI SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
LLVM_ABI SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
LLVM_ABI bool SignBitIsZero(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero.
LLVM_ABI SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
LLVM_ABI SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
LLVM_ABI SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
LLVM_ABI std::pair< SDValue, SDValue > getStrictFPExtendOrRound(SDValue Op, SDValue Chain, const SDLoc &DL, EVT VT)
Convert Op, which must be a STRICT operation of float type, to the float type VT, by either extending...
LLVM_ABI SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
LLVM_ABI SDValue getValueType(EVT)
LLVM_ABI SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
LLVM_ABI SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of float type, to the float type VT, by either extending or rounding (by tr...
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
LLVM_ABI unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
LLVM_ABI void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
LLVM_ABI SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LLVM_ABI KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
LLVM_ABI SDValue getRegisterMask(const uint32_t *RegMask)
LLVM_ABI SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
LLVM_ABI bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
LLVM_ABI SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
LLVM_ABI SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
LLVM_ABI SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
LLVM_ABI SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
LLVM_ABI std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
LLVM_ABI SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
ArrayRef< int > getMask() const
const_iterator begin() const
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
StringRef - Represent a constant reference to a string, i.e.
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
constexpr size_t size() const
size - Get the string size.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
A SystemZ-specific class detailing special use registers particular for calling conventions.
virtual int getStackPointerBias()=0
virtual int getReturnFunctionAddressRegister()=0
virtual int getCallFrameSize()=0
virtual int getStackPointerRegister()=0
static SystemZConstantPoolValue * Create(const GlobalValue *GV, SystemZCP::SystemZCPModifier Modifier)
unsigned getVarArgsFrameIndex() const
void setVarArgsFrameIndex(unsigned FI)
void setRegSaveFrameIndex(unsigned FI)
void incNumLocalDynamicTLSAccesses()
Register getVarArgsFirstGPR() const
void setADAVirtualRegister(Register Reg)
void setVarArgsFirstGPR(Register GPR)
Register getADAVirtualRegister() const
void setSizeOfFnParams(unsigned Size)
void setVarArgsFirstFPR(Register FPR)
unsigned getRegSaveFrameIndex() const
Register getVarArgsFirstFPR() const
const SystemZInstrInfo * getInstrInfo() const override
SystemZCallingConventionRegisters * getSpecialRegisters() const
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
EVT getOptimalMemOpType(LLVMContext &Context, const MemOp &Op, const AttributeList &FuncAttributes) const override
Returns the target specific optimal type for load and store operations as a result of memset,...
bool hasInlineStackProbe(const MachineFunction &MF) const override
Returns true if stack probing through inline assembly is requested.
bool findOptimalMemOpLowering(LLVMContext &Context, std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const override
Determines the optimal series of memory ops to replace the memset / memcpy.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *BB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
MachineBasicBlock * emitEHSjLjSetJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
AtomicExpansionKind shouldCastAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be cast by the IR-level AtomicExpand pass.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &, EVT) const override
Return the ValueType of the result of SETCC operations.
bool allowTruncateForTailCall(Type *, Type *) const override
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
SDValue LowerAsmOutputForConstraint(SDValue &Chain, SDValue &Flag, const SDLoc &DL, const AsmOperandInfo &Constraint, SelectionDAG &DAG) const override
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
MachineBasicBlock * emitEHSjLjLongJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
CondMergingParams getJumpConditionMergingParams(Instruction::BinaryOps Opc, const Value *Lhs, const Value *Rhs) const override
bool useSoftFloat() const override
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context, const Type *RetTy) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
std::pair< SDValue, SDValue > makeExternalCall(SDValue Chain, SelectionDAG &DAG, const char *CalleeName, EVT RetVT, ArrayRef< SDValue > Ops, CallingConv::ID CallConv, bool IsSigned, SDLoc DL, bool DoesNotReturn, bool IsReturnValueUsed) const
bool mayBeEmittedAsTailCall(const CallInst *CI) const override
Return true if the target may be able emit the call instruction as a tail call.
bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const override
Target-specific splitting of values into parts that fit a register storing a legal type.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
SystemZTargetLowering(const TargetMachine &TM, const SystemZSubtarget &STI)
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
bool isLegalICmpImmediate(int64_t Imm) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
TargetLowering::ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const override
Determine if the target supports unaligned memory accesses.
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
TargetLowering::ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const override
Target-specific combining of register parts into its original value.
bool isTruncateFree(Type *, Type *) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
SDValue useLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, MVT VT, SDValue Arg, SDLoc DL, SDValue Chain, bool IsStrict) const
unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const override
Determine the number of bits in the operation that are sign bits.
void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
bool isLegalAddImmediate(int64_t Imm) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
bool isGuaranteedNotToBeUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, unsigned Depth) const override
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
AtomicExpansionKind shouldCastAtomicStoreInIR(StoreInst *SI) const override
Returns how the given (atomic) store should be cast by the IR-level AtomicExpand pass into.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
bool hasAndNot(SDValue Y) const override
Return true if the target has a bitwise and-not operation: X = ~A & B This can be used to simplify se...
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
unsigned getStackProbeSize(const MachineFunction &MF) const
XPLINK64 calling convention specific use registers Particular to z/OS when in 64 bit mode.
Information about stack frame layout on the target.
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setAtomicLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Let target indicate that an extending atomic load of the specified type is legal.
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
virtual const TargetRegisterClass * getRepRegClassFor(MVT VT) const
Return the 'representative' register class for the specified value type.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
virtual bool shouldSignExtendTypeInLibCall(Type *Ty, bool IsSigned) const
Returns true if arguments should be sign-extended in lib calls.
std::vector< ArgListEntry > ArgListTy
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
virtual bool findOptimalMemOpLowering(LLVMContext &Context, std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const
Determines the optimal series of memory ops to replace the memset / memcpy.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
TargetLowering(const TargetLowering &)=delete
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Primary interface to the complete machine description for the target machine.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
unsigned getPointerSize(unsigned AS) const
Get the pointer size for this target.
CodeModel::Model getCodeModel() const
Returns the code model.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntegerTy() const
True if this is an instance of IntegerType.
A Use represents the edge between a Value definition and its users.
User * getUser() const
Returns the User that contains this Use.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
user_iterator user_begin()
bool hasOneUse() const
Return true if there is exactly one use of this value.
int getNumOccurrences() const
constexpr ScalarTy getFixedValue() const
A raw_ostream that writes to a file descriptor.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ EH_SJLJ_LONGJMP
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ ADD
Simple integer binary arithmetic operators.
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ STRICT_FSQRT
Constrained versions of libm-equivalent floating point intrinsics.
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ SSUBO
Same for subtraction.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ UNDEF
UNDEF - An undefined node.
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ GET_ROUNDING
Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest, ties to even 2 Round to ...
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ STRICT_SINT_TO_FP
STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to a floating point value.
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ STRICT_FADD
Constrained versions of the binary floating point operators.
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ EH_SJLJ_SETJMP
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj....
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isNormalStore(const SDNode *N)
Returns true if the specified node is a non-truncating and unindexed store.
LLVM_ABI bool isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly=false)
Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where all of the elements are 0 o...
LLVM_ABI CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
LLVM_ABI CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
LLVM_ABI bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
LLVM_ABI bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
bool match(Val *V, const Pattern &P)
class_match< CmpInst > m_Cmp()
Matches any compare instruction and ignore it.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
LLVM_ABI Libcall getSINTTOFP(EVT OpVT, EVT RetVT)
getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getUINTTOFP(EVT OpVT, EVT RetVT)
getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
LLVM_ABI Libcall getFPTOSINT(EVT OpVT, EVT RetVT)
getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
@ Define
Register definition.
@ System
Synchronized with respect to all concurrently executing threads.
@ MO_ADA_DATA_SYMBOL_ADDR
@ MO_ADA_DIRECT_FUNC_DESC
@ MO_ADA_INDIRECT_FUNC_DESC
const unsigned GR64Regs[16]
const unsigned VR128Regs[32]
const unsigned VR16Regs[32]
const unsigned GR128Regs[16]
const unsigned FP32Regs[16]
const unsigned FP16Regs[16]
const unsigned GR32Regs[16]
const unsigned FP64Regs[16]
const int64_t ELFCallFrameSize
const unsigned VR64Regs[32]
const unsigned FP128Regs[16]
const unsigned VR32Regs[32]
unsigned odd128(bool Is32bit)
const unsigned CCMASK_CMP_GE
static bool isImmHH(uint64_t Val)
const unsigned CCMASK_TEND
const unsigned CCMASK_CS_EQ
const unsigned CCMASK_TBEGIN
const MCPhysReg ELFArgFPRs[ELFNumArgFPRs]
MachineBasicBlock * splitBlockBefore(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
const unsigned CCMASK_TM_SOME_1
const unsigned CCMASK_LOGICAL_CARRY
const unsigned TDCMASK_NORMAL_MINUS
const unsigned CCMASK_TDC
const unsigned CCMASK_FCMP
const unsigned CCMASK_TM_SOME_0
static bool isImmHL(uint64_t Val)
const unsigned TDCMASK_SUBNORMAL_MINUS
const unsigned TDCMASK_NORMAL_PLUS
const unsigned CCMASK_CMP_GT
const unsigned TDCMASK_QNAN_MINUS
const unsigned CCMASK_ANY
const unsigned CCMASK_ARITH
const unsigned CCMASK_TM_MIXED_MSB_0
const unsigned TDCMASK_SUBNORMAL_PLUS
static bool isImmLL(uint64_t Val)
const unsigned VectorBits
static bool isImmLH(uint64_t Val)
MachineBasicBlock * emitBlockAfter(MachineBasicBlock *MBB)
const unsigned TDCMASK_INFINITY_PLUS
unsigned reverseCCMask(unsigned CCMask)
const unsigned CCMASK_TM_ALL_0
const unsigned CCMASK_CMP_LE
const unsigned CCMASK_CMP_O
const unsigned CCMASK_CMP_EQ
const unsigned VectorBytes
const unsigned TDCMASK_INFINITY_MINUS
const unsigned CCMASK_ICMP
const unsigned CCMASK_VCMP_ALL
const unsigned CCMASK_VCMP_NONE
MachineBasicBlock * splitBlockAfter(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
const unsigned CCMASK_VCMP
const unsigned CCMASK_TM_MIXED_MSB_1
const unsigned CCMASK_TM_MSB_0
const unsigned CCMASK_ARITH_OVERFLOW
const unsigned CCMASK_CS_NE
const unsigned TDCMASK_SNAN_PLUS
const unsigned CCMASK_NONE
const unsigned CCMASK_CMP_LT
const unsigned CCMASK_CMP_NE
const unsigned TDCMASK_ZERO_PLUS
const unsigned TDCMASK_QNAN_PLUS
const unsigned TDCMASK_ZERO_MINUS
unsigned even128(bool Is32bit)
const unsigned CCMASK_TM_ALL_1
const unsigned CCMASK_LOGICAL_BORROW
const unsigned ELFNumArgFPRs
const unsigned CCMASK_CMP_UO
const unsigned CCMASK_LOGICAL
const unsigned CCMASK_TM_MSB_1
const unsigned TDCMASK_SNAN_MINUS
initializer< Ty > init(const Ty &Val)
support::ulittle32_t Word
@ User
could "use" a pointer
NodeAddr< UseNode * > Use
NodeAddr< NodeBase * > Node
NodeAddr< CodeNode * > Code
constexpr const char32_t SBase
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
FunctionAddr VTableAddr Value
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
LLVM_ABI bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
LLVM_ABI SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
testing::Matcher< const detail::ErrorHolder & > Failed()
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
constexpr T maskLeadingOnes(unsigned N)
Create a bitmask with the N left-most bits set to 1, and all other bits set to 0.
constexpr bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
LLVM_ABI void dumpBytes(ArrayRef< uint8_t > Bytes, raw_ostream &OS)
Convert ‘Bytes’ to a hex string and output to ‘OS’.
T bit_ceil(T Value)
Returns the smallest integral power of two no smaller than Value if Value is nonzero.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
LLVM_ABI bool isBitwiseNot(SDValue V, bool AllowUndefs=false)
Returns true if V is a bitwise not operation.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
FunctionAddr VTableAddr Count
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
@ Success
The lock was released successfully.
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
LLVM_ABI ConstantSDNode * isConstOrConstSplat(SDValue N, bool AllowUndefs=false, bool AllowTruncation=false)
Returns the SDNode if it is a constant splat BuildVector or constant int.
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
constexpr T maskTrailingOnes(unsigned N)
Create a bitmask with the N right-most bits set to 1, and all other bits set to 0.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
LLVM_ABI bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
AddressingMode(bool LongDispl, bool IdxReg)
This struct is a compact representation of a valid (non-zero power of two) alignment.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
LLVM_ABI Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isRound() const
Return true if the size is a power-of-two number of bytes.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
unsigned getBitWidth() const
Get the bit width of this value.
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
void resetAll()
Resets the known state of all bits.
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
This class contains a discriminated union of information about pointers in memory operands,...
static LLVM_ABI MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
SystemZVectorConstantInfo(APInt IntImm)
SmallVector< unsigned, 2 > OpVals
bool isVectorConstantLegal(const SystemZSubtarget &Subtarget)
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This contains information for each constraint that we are lowering.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setZExtResult(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setSExtResult(bool Value=true)
CallLoweringInfo & setNoReturn(bool Value=true)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})
This structure is used to pass arguments to makeLibCall function.