29 #define DEBUG_TYPE "systemz-lower"
34 struct IPMConversion {
35 IPMConversion(
unsigned xorValue, int64_t addValue,
unsigned bit)
36 : XORValue(xorValue), AddValue(addValue),
Bit(bit) {}
46 : Op0(Op0In), Op1(Op1In), Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {}
547 assert (isa<LoadInst>(I) || isa<StoreInst>(I));
551 bool IsVectorAccess = MemAccessTy->
isVectorTy();
555 if (!IsVectorAccess && isa<StoreInst>(I)) {
557 if (isa<ExtractElementInst>(DataOp))
558 IsVectorAccess =
true;
563 if (!IsVectorAccess && isa<LoadInst>(I) && I->
hasOneUse()) {
565 if (isa<InsertElementInst>(LoadUser))
566 IsVectorAccess =
true;
569 if (!isUInt<12>(Offset) && (IsFPAccess || IsVectorAccess))
580 return FromBits > ToBits;
588 return FromBits > ToBits;
597 if (Constraint.
size() == 1) {
598 switch (Constraint[0]) {
629 const char *constraint)
const {
638 switch (*constraint) {
657 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
663 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
664 if (isUInt<12>(
C->getZExtValue()))
669 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
675 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
676 if (isInt<20>(
C->getSExtValue()))
681 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
682 if (
C->getZExtValue() == 0x7fffffff)
692 static std::pair<unsigned, const TargetRegisterClass *>
694 const unsigned *Map) {
695 assert(*(Constraint.
end()-1) ==
'}' &&
"Missing '}'");
696 if (isdigit(Constraint[2])) {
699 Constraint.
slice(2, Constraint.
size() - 1).getAsInteger(10, Index);
700 if (!Failed && Index < 16 && Map[Index])
701 return std::make_pair(Map[Index], RC);
703 return std::make_pair(0U,
nullptr);
706 std::pair<unsigned, const TargetRegisterClass *>
709 if (Constraint.
size() == 1) {
711 switch (Constraint[0]) {
716 return std::make_pair(0U, &SystemZ::GR64BitRegClass);
718 return std::make_pair(0U, &SystemZ::GR128BitRegClass);
719 return std::make_pair(0U, &SystemZ::GR32BitRegClass);
723 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass);
725 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass);
726 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass);
729 return std::make_pair(0U, &SystemZ::GRH32BitRegClass);
733 return std::make_pair(0U, &SystemZ::FP64BitRegClass);
735 return std::make_pair(0U, &SystemZ::FP128BitRegClass);
736 return std::make_pair(0U, &SystemZ::FP32BitRegClass);
739 if (Constraint.
size() > 0 && Constraint[0] ==
'{') {
744 if (Constraint[1] ==
'r') {
754 if (Constraint[1] ==
'f') {
770 std::vector<SDValue> &Ops,
773 if (Constraint.length() == 1) {
774 switch (Constraint[0]) {
776 if (
auto *
C = dyn_cast<ConstantSDNode>(Op))
783 if (
auto *
C = dyn_cast<ConstantSDNode>(Op))
784 if (isUInt<12>(
C->getZExtValue()))
790 if (
auto *
C = dyn_cast<ConstantSDNode>(Op))
797 if (
auto *
C = dyn_cast<ConstantSDNode>(Op))
798 if (isInt<20>(
C->getSExtValue()))
804 if (
auto *
C = dyn_cast<ConstantSDNode>(Op))
805 if (
C->getZExtValue() == 0x7fffffff)
818 #include "SystemZGenCallingConv.inc"
821 Type *ToType)
const {
838 for (
unsigned i = 0;
i < Ins.
size(); ++
i)
843 for (
unsigned i = 0;
i < Outs.
size(); ++
i)
925 unsigned NumFixedGPRs = 0;
926 unsigned NumFixedFPRs = 0;
927 for (
unsigned I = 0,
E = ArgLocs.
size();
I !=
E; ++
I) {
940 RC = &SystemZ::GR32BitRegClass;
944 RC = &SystemZ::GR64BitRegClass;
948 RC = &SystemZ::FP32BitRegClass;
952 RC = &SystemZ::FP64BitRegClass;
960 RC = &SystemZ::VR128BitRegClass;
981 ArgValue = DAG.
getLoad(LocVT, DL, Chain, FIN,
992 unsigned ArgIndex = Ins[
I].OrigArgIndex;
993 assert (Ins[
I].PartOffset == 0);
994 while (
I + 1 !=
E && Ins[
I + 1].OrigArgIndex == ArgIndex) {
996 unsigned PartOffset = Ins[
I + 1].PartOffset;
1014 int64_t StackSize = CCInfo.getNextStackOffset();
1019 int64_t RegSaveOffset = TFL->getOffsetOfLocalArea();
1032 &SystemZ::FP64BitRegClass);
1040 SystemZ::NumArgFPRs-NumFixedFPRs));
1053 for (
unsigned I = 0,
E = ArgLocs.
size();
I !=
E; ++
I) {
1060 if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D)
1062 if (Outs[
I].
Flags.isSwiftSelf() || Outs[
I].Flags.isSwiftError())
1101 unsigned NumBytes = ArgCCInfo.getNextStackOffset();
1113 for (
unsigned I = 0,
E = ArgLocs.
size();
I !=
E; ++
I) {
1120 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
1122 DAG.
getStore(Chain, DL, ArgValue, SpillSlot,
1126 unsigned ArgIndex = Outs[
I].OrigArgIndex;
1127 assert (Outs[
I].PartOffset == 0);
1128 while (
I + 1 !=
E && Outs[
I + 1].OrigArgIndex == ArgIndex) {
1129 SDValue PartValue = OutVals[
I + 1];
1130 unsigned PartOffset = Outs[
I + 1].PartOffset;
1134 DAG.
getStore(Chain, DL, PartValue, Address,
1138 ArgValue = SpillSlot;
1165 if (!MemOpChains.
empty())
1172 if (
auto *
G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1175 }
else if (
auto *
E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1178 }
else if (IsTailCall) {
1179 Chain = DAG.
getCopyToReg(Chain, DL, SystemZ::R1D, Callee, Glue);
1185 for (
unsigned I = 0,
E = RegsToPass.
size();
I !=
E; ++
I) {
1187 RegsToPass[
I].second, Glue);
1198 for (
unsigned I = 0,
E = RegsToPass.
size();
I !=
E; ++
I)
1200 RegsToPass[I].second.getValueType()));
1205 assert(Mask &&
"Missing call preserved mask for calling convention");
1232 for (
unsigned I = 0,
E = RetLocs.
size(); I !=
E; ++
I) {
1260 for (
auto &Out : Outs)
1265 CCState RetCCInfo(CallConv, isVarArg, MF, RetLocs, Context);
1266 return RetCCInfo.
CheckReturn(Outs, RetCC_SystemZ);
1287 if (RetLocs.
empty())
1294 for (
unsigned I = 0,
E = RetLocs.
size();
I !=
E; ++
I) {
1306 Chain = DAG.
getCopyToReg(Chain, DL, Reg, RetValue, Glue);
1328 unsigned &CCValid) {
1329 unsigned Id = cast<ConstantSDNode>(Op.
getOperand(1))->getZExtValue();
1331 case Intrinsic::s390_tbegin:
1336 case Intrinsic::s390_tbegin_nofloat:
1341 case Intrinsic::s390_tend:
1355 unsigned Id = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
1357 case Intrinsic::s390_vpkshs:
1358 case Intrinsic::s390_vpksfs:
1359 case Intrinsic::s390_vpksgs:
1364 case Intrinsic::s390_vpklshs:
1365 case Intrinsic::s390_vpklsfs:
1366 case Intrinsic::s390_vpklsgs:
1371 case Intrinsic::s390_vceqbs:
1372 case Intrinsic::s390_vceqhs:
1373 case Intrinsic::s390_vceqfs:
1374 case Intrinsic::s390_vceqgs:
1379 case Intrinsic::s390_vchbs:
1380 case Intrinsic::s390_vchhs:
1381 case Intrinsic::s390_vchfs:
1382 case Intrinsic::s390_vchgs:
1387 case Intrinsic::s390_vchlbs:
1388 case Intrinsic::s390_vchlhs:
1389 case Intrinsic::s390_vchlfs:
1390 case Intrinsic::s390_vchlgs:
1395 case Intrinsic::s390_vtm:
1400 case Intrinsic::s390_vfaebs:
1401 case Intrinsic::s390_vfaehs:
1402 case Intrinsic::s390_vfaefs:
1407 case Intrinsic::s390_vfaezbs:
1408 case Intrinsic::s390_vfaezhs:
1409 case Intrinsic::s390_vfaezfs:
1414 case Intrinsic::s390_vfeebs:
1415 case Intrinsic::s390_vfeehs:
1416 case Intrinsic::s390_vfeefs:
1421 case Intrinsic::s390_vfeezbs:
1422 case Intrinsic::s390_vfeezhs:
1423 case Intrinsic::s390_vfeezfs:
1428 case Intrinsic::s390_vfenebs:
1429 case Intrinsic::s390_vfenehs:
1430 case Intrinsic::s390_vfenefs:
1435 case Intrinsic::s390_vfenezbs:
1436 case Intrinsic::s390_vfenezhs:
1437 case Intrinsic::s390_vfenezfs:
1442 case Intrinsic::s390_vistrbs:
1443 case Intrinsic::s390_vistrhs:
1444 case Intrinsic::s390_vistrfs:
1449 case Intrinsic::s390_vstrcbs:
1450 case Intrinsic::s390_vstrchs:
1451 case Intrinsic::s390_vstrcfs:
1456 case Intrinsic::s390_vstrczbs:
1457 case Intrinsic::s390_vstrczhs:
1458 case Intrinsic::s390_vstrczfs:
1463 case Intrinsic::s390_vfcedbs:
1468 case Intrinsic::s390_vfchdbs:
1473 case Intrinsic::s390_vfchedbs:
1478 case Intrinsic::s390_vftcidb:
1483 case Intrinsic::s390_tdc:
1501 for (
unsigned I = 2;
I < NumOps; ++
I)
1520 for (
unsigned I = 1;
I < NumOps; ++
I)
1537 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \
1538 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \
1539 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X
1576 uint64_t TopBit = uint64_t(1) << 31;
1581 if (CCMask == (CCValid & (SystemZ::CCMASK_0
1589 | SystemZ::CCMASK_3)))
1601 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_3)))
1612 if (CCMask == (CCValid & (SystemZ::CCMASK_0
1614 | SystemZ::CCMASK_3)))
1616 if (CCMask == (CCValid & (SystemZ::CCMASK_0
1618 | SystemZ::CCMASK_3)))
1635 int64_t
Value = ConstOp1->getSExtValue();
1641 C.Op1 = DAG.
getConstant(0, DL, C.Op1.getValueType());
1651 if (!C.Op0.hasOneUse() ||
1657 auto *
Load = cast<LoadSDNode>(C.Op0);
1658 unsigned NumBits =
Load->getMemoryVT().getStoreSizeInBits();
1659 if (NumBits != 8 && NumBits != 16)
1664 auto *ConstOp1 = cast<ConstantSDNode>(C.Op1);
1665 uint64_t
Value = ConstOp1->getZExtValue();
1666 uint64_t
Mask = (1 << NumBits) - 1;
1669 int64_t SignedValue = ConstOp1->getSExtValue();
1670 if (uint64_t(SignedValue) + (uint64_t(1) << (NumBits - 1)) > Mask)
1676 }
else if (NumBits == 8) {
1702 if (C.Op0.getValueType() !=
MVT::i32 ||
1703 Load->getExtensionType() != ExtType)
1705 Load->getBasePtr(),
Load->getPointerInfo(),
1706 Load->getMemoryVT(),
Load->getAlignment(),
1707 Load->getMemOperand()->getFlags());
1710 if (C.Op1.getValueType() !=
MVT::i32 ||
1711 Value != ConstOp1->getZExtValue())
1724 switch (
Load->getExtensionType()) {
1747 if (isa<ConstantFPSDNode>(C.Op1))
1753 if (ConstOp1 && ConstOp1->getZExtValue() == 0)
1782 unsigned Opcode0 = C.Op0.getOpcode();
1790 cast<ConstantSDNode>(C.Op0.getOperand(1))->getZExtValue() == 0xffffffff)
1812 for (
auto I = C.Op0->use_begin(),
E = C.Op0->use_end();
I !=
E; ++
I) {
1831 if (C1 && C1->isZero()) {
1832 for (
auto I = C.Op0->use_begin(),
E = C.Op0->use_end();
I !=
E; ++
I) {
1851 if (C.Op0.getOpcode() ==
ISD::SHL &&
1852 C.Op0.getValueType() ==
MVT::i64 &&
1854 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
1856 if (C1 && C1->getZExtValue() == 32) {
1857 SDValue ShlOp0 = C.Op0.getOperand(0);
1877 C.Op0.getOperand(0).getOpcode() ==
ISD::LOAD &&
1879 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
1880 auto *
L = cast<LoadSDNode>(C.Op0.getOperand(0));
1881 if (
L->getMemoryVT().getStoreSizeInBits() <= C.Op0.getValueSizeInBits()) {
1882 unsigned Type =
L->getExtensionType();
1885 C.Op0 = C.Op0.getOperand(0);
1886 C.Op1 = DAG.
getConstant(0, DL, C.Op0.getValueType());
1914 uint64_t
Mask, uint64_t CmpVal,
1915 unsigned ICmpType) {
1916 assert(Mask != 0 &&
"ANDs with zero should have been removed by now");
1925 uint64_t
High = uint64_t(1) << HighShift;
1939 if (EffectivelyUnsigned && CmpVal > 0 && CmpVal <= Low) {
1945 if (EffectivelyUnsigned && CmpVal < Low) {
1953 if (CmpVal == Mask) {
1959 if (EffectivelyUnsigned && CmpVal >= Mask - Low && CmpVal < Mask) {
1965 if (EffectivelyUnsigned && CmpVal > Mask - Low && CmpVal <= Mask) {
1973 if (EffectivelyUnsigned && CmpVal >= Mask - High && CmpVal < High) {
1979 if (EffectivelyUnsigned && CmpVal > Mask - High && CmpVal <= High) {
1988 if (Mask == Low + High) {
2017 if (C.Op0.getOpcode() ==
ISD::AND) {
2018 NewC.Op0 = C.Op0.getOperand(0);
2019 NewC.Op1 = C.Op0.getOperand(1);
2028 if (NewC.Op0.getValueType() !=
MVT::i64 ||
2036 if (CmpVal == uint64_t(-1))
2043 MaskVal = -(CmpVal & -CmpVal);
2051 unsigned BitSize = NewC.Op0.getValueSizeInBits();
2052 unsigned NewCCMask, ShiftVal;
2054 NewC.Op0.getOpcode() ==
ISD::SHL &&
2057 MaskVal >> ShiftVal,
2060 NewC.Op0 = NewC.Op0.getOperand(0);
2061 MaskVal >>= ShiftVal;
2063 NewC.Op0.getOpcode() ==
ISD::SRL &&
2066 MaskVal << ShiftVal,
2069 NewC.Op0 = NewC.Op0.getOperand(0);
2070 MaskVal <<= ShiftVal;
2084 C.Op1 = DAG.
getConstant(MaskVal, DL, C.Op0.getValueType());
2086 C.CCMask = NewCCMask;
2098 C.CCValid = CCValid;
2101 C.CCMask = CC < 4 ? 1 << (3 - CC) : 0;
2104 C.CCMask = CC < 4 ? ~(1 << (3 - CC)) : -1;
2108 C.CCMask = CC < 4 ? ~0U << (4 - CC) : -1;
2111 C.CCMask = CC < 4 ? ~(~0U << (4 - CC)) : 0;
2115 C.CCMask = CC < 4 ? ~0U << (3 - CC) : -1;
2118 C.CCMask = CC < 4 ? ~(~0U << (3 - CC)) : 0;
2121 C.CCMask &= CCValid;
2129 uint64_t
Constant = cast<ConstantSDNode>(CmpOp1)->getZExtValue();
2130 unsigned Opcode, CCValid;
2140 Comparison
C(CmpOp0, CmpOp1);
2142 if (C.Op0.getValueType().isFloatingPoint()) {
2181 if (!C.Op1.getNode()) {
2183 switch (C.Op0.getOpcode()) {
2228 unsigned Extend,
unsigned Opcode,
SDValue Op0,
2242 unsigned CCValid,
unsigned CCMask) {
2246 if (Conversion.XORValue)
2250 if (Conversion.AddValue)
2257 if (Conversion.Bit != 31)
2312 int Mask[] = { Start, -1, Start + 1, -1 };
2332 return DAG.
getNode(Opcode, DL, VT, CmpOp0, CmpOp1);
2341 bool Invert =
false;
2348 assert(IsFP &&
"Unexpected integer comparison");
2359 assert(IsFP &&
"Unexpected integer comparison");
2371 Cmp =
getVectorCmp(DAG, Opcode, DL, VT, CmpOp0, CmpOp1);
2375 Cmp =
getVectorCmp(DAG, Opcode, DL, VT, CmpOp1, CmpOp0);
2400 Comparison
C(
getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
2402 return emitSETCC(DAG, DL, Glue,
C.CCValid,
C.CCMask);
2412 Comparison
C(
getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
2424 cast<ConstantSDNode>(Neg.
getOperand(0))->getZExtValue() == 0 &&
2428 Pos.getOperand(0) == CmpOp)));
2450 Comparison
C(
getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
2459 cast<ConstantSDNode>(
C.Op1)->getZExtValue() == 0) {
2472 if (TrueC && FalseC) {
2474 int64_t FalseVal = FalseC->getSExtValue();
2475 if ((TrueVal == -1 && FalseVal == 0) || (TrueVal == 0 && FalseVal == -1)) {
2478 C.CCMask ^=
C.CCValid;
2509 uint64_t Anchor = Offset & ~uint64_t(0xfff);
2515 if (Offset != 0 && (Offset & 1) == 0) {
2547 Chain = DAG.
getCopyToReg(Chain, DL, SystemZ::R12D, GOT, Glue);
2549 Chain = DAG.
getCopyToReg(Chain, DL, SystemZ::R2D, GOTOffset, Glue);
2568 assert(Mask &&
"Missing call preserved mask for calling convention");
2576 Chain = DAG.
getNode(Opcode, DL, NodeTys, Ops);
2583 SDValue SystemZTargetLowering::lowerThreadPointer(
const SDLoc &DL,
2611 SDValue TP = lowerThreadPointer(DL, DAG);
2737 unsigned Depth = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
2743 if (!BackChainIdx) {
2746 FI->setFramePointerSaveIndex(BackChainIdx);
2768 unsigned Depth = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
2777 unsigned LinkReg = MF.
addLiveIn(SystemZ::R14D, &SystemZ::GR64BitRegClass);
2791 if (
auto *LoadN = dyn_cast<LoadSDNode>(In))
2792 return DAG.
getLoad(ResVT, DL, LoadN->getChain(), LoadN->getBasePtr(),
2793 LoadN->getMemOperand());
2839 const unsigned NumFields = 4;
2849 unsigned Offset = 0;
2850 for (
unsigned I = 0;
I < NumFields; ++
I) {
2855 MemOps[
I] = DAG.
getStore(Chain, DL, Fields[
I], FieldAddr,
2867 const Value *DstSV = cast<SrcValueSDNode>(Op.
getOperand(3))->getValue();
2868 const Value *SrcSV = cast<SrcValueSDNode>(Op.
getOperand(4))->getValue();
2877 SDValue SystemZTargetLowering::
2881 bool RealignOpt = !MF.
getFunction()-> hasFnAttribute(
"no-realign-stack");
2891 uint64_t AlignVal = (RealignOpt ?
2895 uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
2896 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;
2910 if (ExtraAlignSpace)
2927 if (RequiredAlign > StackAlign) {
2939 SDValue Ops[2] = { Result, Chain };
2943 SDValue SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET(
2983 LL, RL, Ops[1], Ops[0]);
3034 Op0, Op1, Ops[1], Ops[0]);
3061 APInt KnownZero[2], KnownOne[2];
3070 if ((Masks[0] >> 32) == 0xffffffff &&
uint32_t(Masks[1]) == 0xffffffff)
3072 else if ((Masks[1] >> 32) == 0xffffffff &&
uint32_t(Masks[0]) == 0xffffffff)
3087 int64_t
Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue());
3097 uint64_t Mask = cast<ConstantSDNode>(HighOp.
getOperand(1))->getZExtValue();
3153 APInt KnownZero, KnownOne;
3155 unsigned NumSignificantBits = (~KnownZero).getActiveBits();
3156 if (NumSignificantBits == 0)
3161 int64_t BitSize = (int64_t)1 <<
Log2_32_Ceil(NumSignificantBits);
3162 BitSize =
std::min(BitSize, OrigBitSize);
3171 for (int64_t I = BitSize / 2; I >= 8; I = I / 2) {
3173 if (BitSize != OrigBitSize)
3175 DAG.
getConstant(((uint64_t)1 << BitSize) - 1, DL, VT));
3191 cast<ConstantSDNode>(Op.
getOperand(1))->getZExtValue());
3193 cast<ConstantSDNode>(Op.
getOperand(2))->getZExtValue());
3211 auto *Node = cast<AtomicSDNode>(Op.
getNode());
3213 Node->getChain(), Node->getBasePtr(),
3214 Node->getMemoryVT(), Node->getMemOperand());
3221 auto *Node = cast<AtomicSDNode>(Op.
getNode());
3223 Node->getBasePtr(), Node->getMemoryVT(),
3224 Node->getMemOperand());
3233 unsigned Opcode)
const {
3234 auto *Node = cast<AtomicSDNode>(Op.
getNode());
3237 EVT NarrowVT = Node->getMemoryVT();
3239 if (NarrowVT == WideVT)
3242 int64_t BitSize = NarrowVT.getSizeInBits();
3243 SDValue ChainIn = Node->getChain();
3244 SDValue Addr = Node->getBasePtr();
3245 SDValue Src2 = Node->getVal();
3252 if (
auto *Const = dyn_cast<ConstantSDNode>(Src2)) {
3287 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift,
3307 auto *Node = cast<AtomicSDNode>(Op.
getNode());
3308 EVT MemVT = Node->getMemoryVT();
3312 SDValue Src2 = Node->getVal();
3316 if (
auto *Op2 = dyn_cast<ConstantSDNode>(Src2)) {
3319 int64_t Value = (-Op2->getAPIntValue()).getSExtValue();
3329 Node->getChain(), Node->getBasePtr(), NegSrc2,
3330 Node->getMemOperand());
3343 auto *Node = cast<AtomicSDNode>(Op.
getNode());
3346 EVT NarrowVT = Node->getMemoryVT();
3348 if (NarrowVT == WideVT)
3351 int64_t BitSize = NarrowVT.getSizeInBits();
3377 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift,
3378 NegBitShift, DAG.
getConstant(BitSize, DL, WideVT) };
3380 VTList, Ops, NarrowVT, MMO);
3403 if (StoreBackchain) {
3408 Chain = DAG.
getCopyToReg(Chain, DL, SystemZ::R15D, NewSP);
3424 bool IsWrite = cast<ConstantSDNode>(Op.
getOperand(2))->getZExtValue();
3426 auto *Node = cast<MemIntrinsicSDNode>(Op.
getNode());
3434 Node->getMemoryVT(), Node->getMemOperand());
3448 SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(
SDValue Op,
3450 unsigned Opcode, CCValid;
3463 SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(
SDValue Op,
3465 unsigned Opcode, CCValid;
3476 unsigned Id = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
3478 case Intrinsic::thread_pointer:
3479 return lowerThreadPointer(
SDLoc(Op), DAG);
3481 case Intrinsic::s390_vpdi:
3485 case Intrinsic::s390_vperm:
3489 case Intrinsic::s390_vuphb:
3490 case Intrinsic::s390_vuphh:
3491 case Intrinsic::s390_vuphf:
3495 case Intrinsic::s390_vuplhb:
3496 case Intrinsic::s390_vuplhh:
3497 case Intrinsic::s390_vuplhf:
3501 case Intrinsic::s390_vuplb:
3502 case Intrinsic::s390_vuplhw:
3503 case Intrinsic::s390_vuplf:
3507 case Intrinsic::s390_vupllb:
3508 case Intrinsic::s390_vupllh:
3509 case Intrinsic::s390_vupllf:
3513 case Intrinsic::s390_vsumb:
3514 case Intrinsic::s390_vsumh:
3515 case Intrinsic::s390_vsumgh:
3516 case Intrinsic::s390_vsumgf:
3517 case Intrinsic::s390_vsumqf:
3518 case Intrinsic::s390_vsumqg:
3541 { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 } },
3544 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
3547 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
3550 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
3553 { 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 } },
3556 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
3559 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
3562 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
3565 { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 } },
3568 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
3571 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
3574 { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } },
3577 { 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 } }
3591 OpNo0 = OpNo1 = OpNos[1];
3592 }
else if (OpNos[1] < 0) {
3593 OpNo0 = OpNo1 = OpNos[0];
3611 unsigned &OpNo0,
unsigned &OpNo1) {
3612 int OpNos[] = { -1, -1 };
3619 if ((Elt ^ P.Bytes[I]) & (SystemZ::VectorBytes - 1))
3625 if (OpNos[ModelOpNo] == 1 - RealOpNo)
3627 OpNos[ModelOpNo] = RealOpNo;
3635 unsigned &OpNo0,
unsigned &OpNo1) {
3636 for (
auto &
P : PermuteForms)
3652 int Elt = Bytes[From];
3655 Transform[From] = -1;
3657 while (P.Bytes[To] != Elt) {
3659 if (To == SystemZ::VectorBytes)
3662 Transform[From] = To;
3671 for (
auto &
P : PermuteForms)
3684 Bytes.
resize(NumElements * BytesPerElement, -1);
3685 for (
unsigned I = 0; I < NumElements; ++
I) {
3688 for (
unsigned J = 0; J < BytesPerElement; ++J)
3689 Bytes[I * BytesPerElement + J] = Index * BytesPerElement + J;
3698 unsigned BytesPerElement,
int &Base) {
3700 for (
unsigned I = 0; I < BytesPerElement; ++
I) {
3701 if (Bytes[Start + I] >= 0) {
3702 unsigned Elem = Bytes[Start +
I];
3706 if (
unsigned(Base) % Bytes.
size() + BytesPerElement > Bytes.
size())
3708 }
else if (
unsigned(Base) != Elem -
I)
3721 unsigned &StartIndex,
unsigned &OpNo0,
3723 int OpNos[] = { -1, -1 };
3725 for (
unsigned I = 0; I < 16; ++
I) {
3726 int Index = Bytes[
I];
3732 Shift = ExpectedShift;
3733 else if (Shift != ExpectedShift)
3737 if (OpNos[ModelOpNo] == 1 - RealOpNo)
3739 OpNos[ModelOpNo] = RealOpNo;
3769 Op = DAG.
getNode(P.Opcode, DL, InVT, Op0, Op1);
3780 for (
unsigned I = 0; I < 2; ++
I)
3784 unsigned StartIndex, OpNo0, OpNo1;
3802 struct GeneralShuffle {
3803 GeneralShuffle(
EVT vt) : VT(vt) {}
3822 void GeneralShuffle::addUndef() {
3824 for (
unsigned I = 0; I < BytesPerElement; ++
I)
3825 Bytes.push_back(-1);
3840 assert(FromBytesPerElement >= BytesPerElement &&
3841 "Invalid EXTRACT_VECTOR_ELT");
3843 (FromBytesPerElement - BytesPerElement));
3872 for (; OpNo < Ops.size(); ++OpNo)
3873 if (Ops[OpNo] == Op)
3875 if (OpNo == Ops.size())
3880 for (
unsigned I = 0; I < BytesPerElement; ++
I)
3881 Bytes.push_back(Base + I);
3888 if (Ops.size() == 0)
3892 if (Ops.size() == 1)
3904 unsigned Stride = 1;
3905 for (; Stride * 2 < Ops.size(); Stride *= 2) {
3906 for (
unsigned I = 0; I < Ops.size() - Stride; I += Stride * 2) {
3907 SDValue SubOps[] = { Ops[
I], Ops[I + Stride] };
3916 else if (OpNo == I + Stride)
3917 NewBytes[J] = SystemZ::VectorBytes + Byte;
3927 if (NewBytes[J] >= 0) {
3928 assert(
unsigned(NewBytesMap[J]) < SystemZ::VectorBytes &&
3929 "Invalid double permute");
3930 Bytes[J] = I * SystemZ::VectorBytes + NewBytesMap[J];
3932 assert(NewBytesMap[J] < 0 &&
"Invalid double permute");
3938 if (NewBytes[J] >= 0)
3939 Bytes[J] = I * SystemZ::VectorBytes + J;
3946 Ops[1] = Ops[Stride];
3948 if (Bytes[I] >=
int(SystemZ::VectorBytes))
3949 Bytes[
I] -= (Stride - 1) * SystemZ::VectorBytes;
3954 unsigned OpNo0, OpNo1;
4033 Value = dyn_cast<ConstantSDNode>(Op)->getZExtValue();
4035 Value = (dyn_cast<ConstantFPSDNode>(Op)->getValueAPF().bitcastToAPInt()
4039 for (
unsigned J = 0; J < BytesPerElement; ++J) {
4040 uint64_t Byte = (Value >> (J * 8)) & 0xff;
4042 Mask |= 1ULL << ((
E - I - 1) * BytesPerElement + J);
4058 const SDLoc &DL,
EVT VT, uint64_t Value,
4059 unsigned BitsPerElement) {
4061 int64_t SignedValue =
SignExtend64(Value, BitsPerElement);
4072 unsigned Start,
End;
4073 if (TII->
isRxSBGMask(Value, BitsPerElement, Start, End)) {
4078 Start -= 64 - BitsPerElement;
4079 End -= 64 - BitsPerElement;
4103 GeneralShuffle GS(VT);
4105 bool FoundOne =
false;
4106 for (
unsigned I = 0; I < NumElements; ++
I) {
4112 unsigned Elem = cast<ConstantSDNode>(Op.
getOperand(1))->getZExtValue();
4128 if (!ResidueOps.
empty()) {
4129 while (ResidueOps.
size() < NumElements)
4131 for (
auto &Op : GS.Ops) {
4138 return GS.getNode(DAG,
SDLoc(BVN));
4146 unsigned int NumElements = Elems.
size();
4147 unsigned int Count = 0;
4148 for (
auto Elem : Elems) {
4149 if (!Elem.isUndef()) {
4152 else if (Elem != Single) {
4177 return joinDwords(DAG, DL, Elems[0], Elems[1]);
4212 unsigned NumConstants = 0;
4213 for (
unsigned I = 0; I < NumElements; ++
I) {
4218 Constants[
I] = Elem;
4226 if (NumConstants > 0) {
4227 for (
unsigned I = 0; I < NumElements; ++
I)
4228 if (!Constants[I].getNode())
4229 Constants[I] = DAG.
getUNDEF(Elems[I].getValueType());
4236 unsigned I1 = NumElements / 2 - 1;
4237 unsigned I2 = NumElements - 1;
4238 bool Def1 = !Elems[I1].
isUndef();
4239 bool Def2 = !Elems[I2].isUndef();
4241 SDValue Elem1 = Elems[Def1 ? I1 : I2];
4242 SDValue Elem2 = Elems[Def2 ? I2 : I1];
4252 for (
unsigned I = 0; I < NumElements; ++
I)
4253 if (!Done[I] && !Elems[I].isUndef())
4263 auto *BVN = cast<BuildVectorSDNode>(Op.
getNode());
4267 if (BVN->isConstant()) {
4279 APInt SplatBits, SplatUndef;
4280 unsigned SplatBitSize;
4282 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs,
4284 SplatBitSize <= 64) {
4291 uint64_t
Lower = (SplatUndefZ
4293 uint64_t
Upper = (SplatUndefZ
4294 & ~((uint64_t(1) <<
findLastSet(SplatBitsZ)) - 1));
4295 uint64_t Value = SplatBitsZ | Upper |
Lower;
4304 uint64_t Middle = SplatUndefZ & ~Upper & ~
Lower;
4305 Value = SplatBitsZ | Middle;
4326 for (
unsigned I = 0; I < NumElements; ++
I)
4333 auto *VSN = cast<ShuffleVectorSDNode>(Op.
getNode());
4338 if (VSN->isSplat()) {
4340 unsigned Index = VSN->getSplatIndex();
4342 "Splat index should be defined and in first operand");
4352 GeneralShuffle
GS(VT);
4353 for (
unsigned I = 0; I < NumElements; ++
I) {
4354 int Elt = VSN->getMaskElt(I);
4361 return GS.getNode(DAG,
SDLoc(VSN));
4373 SDValue SystemZTargetLowering::lowerINSERT_VECTOR_ELT(
SDValue Op,
4389 uint64_t Index =
dyn_cast<ConstantSDNode>(Op2)->getZExtValue();
4405 SystemZTargetLowering::lowerEXTRACT_VECTOR_ELT(
SDValue Op,
4415 if (
auto *CIndexN = dyn_cast<ConstantSDNode>(Op1)) {
4416 uint64_t Index = CIndexN->getZExtValue();
4432 unsigned UnpackHigh)
const {
4442 PackedOp = DAG.
getNode(UnpackHigh,
SDLoc(PackedOp), OutVT, PackedOp);
4443 }
while (FromBits != ToBits);
4448 unsigned ByScalar)
const {
4457 if (
auto *BVN = dyn_cast<BuildVectorSDNode>(Op1)) {
4458 APInt SplatBits, SplatUndef;
4459 unsigned SplatBitSize;
4463 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs,
4464 ElemBitSize,
true) &&
4465 SplatBitSize == ElemBitSize) {
4468 return DAG.
getNode(ByScalar, DL, VT, Op0, Shift);
4472 SDValue Splat = BVN->getSplatValue(&UndefElements);
4477 return DAG.
getNode(ByScalar, DL, VT, Op0, Shift);
4483 if (
auto *VSN = dyn_cast<ShuffleVectorSDNode>(Op1)) {
4484 if (VSN->isSplat()) {
4486 unsigned Index = VSN->getSplatIndex();
4488 "Splat index should be defined and in first operand");
4495 return DAG.
getNode(ByScalar, DL, VT, Op0, Shift);
4508 return lowerFRAMEADDR(Op, DAG);
4510 return lowerRETURNADDR(Op, DAG);
4512 return lowerBR_CC(Op, DAG);
4514 return lowerSELECT_CC(Op, DAG);
4516 return lowerSETCC(Op, DAG);
4518 return lowerGlobalAddress(cast<GlobalAddressSDNode>(Op), DAG);
4520 return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(Op), DAG);
4522 return lowerBlockAddress(cast<BlockAddressSDNode>(Op), DAG);
4524 return lowerJumpTable(cast<JumpTableSDNode>(Op), DAG);
4526 return lowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG);
4528 return lowerBITCAST(Op, DAG);
4530 return lowerVASTART(Op, DAG);
4532 return lowerVACOPY(Op, DAG);
4534 return lowerDYNAMIC_STACKALLOC(Op, DAG);
4536 return lowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
4538 return lowerSMUL_LOHI(Op, DAG);
4540 return lowerUMUL_LOHI(Op, DAG);
4542 return lowerSDIVREM(Op, DAG);
4544 return lowerUDIVREM(Op, DAG);
4546 return lowerOR(Op, DAG);
4548 return lowerCTPOP(Op, DAG);
4550 return lowerATOMIC_FENCE(Op, DAG);
4554 return lowerATOMIC_STORE(Op, DAG);
4556 return lowerATOMIC_LOAD(Op, DAG);
4560 return lowerATOMIC_LOAD_SUB(Op, DAG);
4578 return lowerATOMIC_CMP_SWAP(Op, DAG);
4580 return lowerSTACKSAVE(Op, DAG);
4582 return lowerSTACKRESTORE(Op, DAG);
4584 return lowerPREFETCH(Op, DAG);
4586 return lowerINTRINSIC_W_CHAIN(Op, DAG);
4588 return lowerINTRINSIC_WO_CHAIN(Op, DAG);
4590 return lowerBUILD_VECTOR(Op, DAG);
4592 return lowerVECTOR_SHUFFLE(Op, DAG);
4594 return lowerSCALAR_TO_VECTOR(Op, DAG);
4596 return lowerINSERT_VECTOR_ELT(Op, DAG);
4598 return lowerEXTRACT_VECTOR_ELT(Op, DAG);
4615 #define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME
4735 SDValue SystemZTargetLowering::combineExtract(
const SDLoc &DL,
EVT ResVT,
4738 DAGCombinerInfo &DCI,
4759 BytesPerElement, First))
4766 if (Byte % BytesPerElement != 0)
4769 Index = Byte / BytesPerElement;
4778 if (OpBytesPerElement < BytesPerElement)
4782 unsigned End = (Index + 1) * BytesPerElement;
4783 if (End % OpBytesPerElement != 0)
4786 Op = Op.
getOperand(End / OpBytesPerElement - 1);
4790 DCI.AddToWorklist(Op.getNode());
4795 DCI.AddToWorklist(Op.getNode());
4805 EVT ExtVT = Op.getValueType();
4806 EVT OpVT = Op.getOperand(0).getValueType();
4809 unsigned Byte = Index * BytesPerElement;
4810 unsigned SubByte = Byte % ExtBytesPerElement;
4811 unsigned MinSubByte = ExtBytesPerElement - OpBytesPerElement;
4812 if (SubByte < MinSubByte ||
4813 SubByte + BytesPerElement > ExtBytesPerElement)
4816 Byte = Byte / ExtBytesPerElement * OpBytesPerElement;
4818 Byte += SubByte - MinSubByte;
4819 if (Byte % BytesPerElement != 0)
4821 Op = Op.getOperand(0);
4822 Index = Byte / BytesPerElement;
4828 if (Op.getValueType() != VecVT) {
4830 DCI.AddToWorklist(Op.getNode());
4840 SDValue SystemZTargetLowering::combineTruncateExtract(
4841 const SDLoc &DL,
EVT TruncVT,
SDValue Op, DAGCombinerInfo &DCI)
const {
4850 if (
auto *IndexN = dyn_cast<ConstantSDNode>(Op.
getOperand(1))) {
4853 if (BytesPerElement % TruncBytes == 0) {
4859 unsigned Scale = BytesPerElement / TruncBytes;
4860 unsigned NewIndex = (IndexN->getZExtValue() + 1) * Scale - 1;
4866 EVT ResVT = (TruncBytes < 4 ?
MVT::i32 : TruncVT);
4867 return combineExtract(DL, ResVT, VecVT, Vec, NewIndex, DCI,
true);
4875 SDValue SystemZTargetLowering::combineSIGN_EXTEND(
4876 SDNode *
N, DAGCombinerInfo &DCI)
const {
4886 if (SraAmt && Inner.hasOneUse() && Inner.getOpcode() ==
ISD::SHL) {
4887 if (
auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.getOperand(1))) {
4889 unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra;
4890 unsigned NewSraAmt = SraAmt->getZExtValue() + Extra;
4893 Inner.getOperand(0));
4905 SDValue SystemZTargetLowering::combineMERGE(
4906 SDNode *N, DAGCombinerInfo &DCI)
const {
4914 cast<ConstantSDNode>(Op0.
getOperand(0))->getZExtValue() == 0) {
4922 if (ElemBytes <= 4) {
4927 SystemZ::VectorBytes / ElemBytes / 2);
4930 DCI.AddToWorklist(Op1.
getNode());
4933 DCI.AddToWorklist(Op.
getNode());
4940 SDValue SystemZTargetLowering::combineSTORE(
4941 SDNode *N, DAGCombinerInfo &DCI)
const {
4943 auto *SN = cast<StoreSDNode>(
N);
4945 EVT MemVT = SN->getMemoryVT();
4952 combineTruncateExtract(
SDLoc(N), MemVT, SN->getValue(), DCI)) {
4953 DCI.AddToWorklist(Value.getNode());
4957 SN->getBasePtr(), SN->getMemoryVT(),
4958 SN->getMemOperand());
4963 if (!SN->isVolatile() &&
4965 Op1.getNode()->hasOneUse() &&
4982 Ops, MemVT, SN->getMemOperand());
4987 SDValue SystemZTargetLowering::combineEXTRACT_VECTOR_ELT(
4988 SDNode *N, DAGCombinerInfo &DCI)
const {
4990 if (
auto *IndexN = dyn_cast<ConstantSDNode>(N->
getOperand(1))) {
4994 IndexN->getZExtValue(), DCI,
false);
4999 SDValue SystemZTargetLowering::combineJOIN_DWORDS(
5000 SDNode *N, DAGCombinerInfo &DCI)
const {
5009 SDValue SystemZTargetLowering::combineFP_ROUND(
5010 SDNode *N, DAGCombinerInfo &DCI)
const {
5024 cast<ConstantSDNode>(Op0.
getOperand(1))->getZExtValue() == 0) {
5026 for (
auto *U : Vec->
uses()) {
5030 U->getOperand(0) == Vec &&
5032 cast<ConstantSDNode>(U->getOperand(1))->getZExtValue() == 1) {
5039 DCI.AddToWorklist(VRound.
getNode());
5043 DCI.AddToWorklist(Extract1.
getNode());
5056 SDValue SystemZTargetLowering::combineBSWAP(
5057 SDNode *N, DAGCombinerInfo &DCI)
const {
5089 DCI.CombineTo(N, ResVal);
5101 SDValue SystemZTargetLowering::combineSHIFTROT(
5102 SDNode *N, DAGCombinerInfo &DCI)
const {
5117 auto *AndMask =
dyn_cast<ConstantSDNode>(AndMaskOp);
5121 auto AmtVal = AndMask->getZExtValue();
5124 if ((AmtVal & 0x3f) == 0x3f) {
5130 DCI.CombineTo(N1.
getNode(), AndOp);
5140 DCI.AddToWorklist(Replace.
getNode());
5148 }
else if (AmtVal >> 16 != 0) {
5151 auto NewMask = DAG.
getConstant(AndMask->getZExtValue() & 0x0000ffff,
5161 DCI.AddToWorklist(Replace.
getNode());
5178 case ISD::STORE:
return combineSTORE(N, DCI);
5182 case ISD::BSWAP:
return combineBSWAP(N, DCI);
5186 case ISD::ROTL:
return combineSHIFTROT(N, DCI);
5246 unsigned LOCROpcode)
const {
5259 BuildMI(*MBB, MI, DL, TII->get(LOCROpcode), DestReg)
5260 .addReg(FalseReg).
addReg(TrueReg)
5274 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5288 BuildMI(*MBB, MI, DL, TII->get(SystemZ::PHI), DestReg)
5289 .addReg(TrueReg).
addMBB(StartMBB)
5302 unsigned StoreOpcode,
5303 unsigned STOCOpcode,
5304 bool Invert)
const {
5324 BuildMI(*MBB, MI, DL, TII->get(STOCOpcode))
5343 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5352 BuildMI(MBB, DL, TII->get(StoreOpcode))
5369 unsigned BitSize,
bool Invert)
const {
5374 bool IsSubWord = (BitSize < 32);
5390 &SystemZ::GR32BitRegClass :
5391 &SystemZ::GR64BitRegClass);
5392 unsigned LOpcode = BitSize <= 32 ?
SystemZ::L : SystemZ::LG;
5393 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG;
5398 assert(LOpcode && CSOpcode &&
"Displacement out of range");
5403 unsigned NewVal = (BinOpcode || IsSubWord ?
5418 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal)
5431 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
5432 .addReg(OrigVal).
addMBB(StartMBB)
5435 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal)
5440 BuildMI(MBB, DL, TII->get(BinOpcode), Tmp)
5444 BuildMI(MBB, DL, TII->get(SystemZ::XILF), RotatedNewVal)
5445 .addReg(Tmp).
addImm(-1U << (32 - BitSize));
5450 BuildMI(MBB, DL, TII->get(SystemZ::LCGR), Tmp2).addReg(Tmp);
5451 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), RotatedNewVal)
5452 .addReg(Tmp2).
addImm(-1);
5454 }
else if (BinOpcode)
5456 BuildMI(MBB, DL, TII->get(BinOpcode), RotatedNewVal)
5461 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedNewVal)
5465 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal)
5467 BuildMI(MBB, DL, TII->get(CSOpcode), Dest)
5469 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5486 unsigned KeepOldMask,
unsigned BitSize)
const {
5491 bool IsSubWord = (BitSize < 32);
5506 &SystemZ::GR32BitRegClass :
5507 &SystemZ::GR64BitRegClass);
5508 unsigned LOpcode = BitSize <= 32 ?
SystemZ::L : SystemZ::LG;
5509 unsigned CSOpcode = BitSize <= 32 ? SystemZ::CS : SystemZ::CSG;
5514 assert(LOpcode && CSOpcode &&
"Displacement out of range");
5536 BuildMI(MBB, DL, TII->get(LOpcode), OrigVal)
5546 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
5547 .addReg(OrigVal).
addMBB(StartMBB)
5550 BuildMI(MBB, DL, TII->get(SystemZ::RLL), RotatedOldVal)
5552 BuildMI(MBB, DL, TII->get(CompareOpcode))
5553 .addReg(RotatedOldVal).
addReg(Src2);
5554 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5564 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RotatedAltVal)
5565 .addReg(RotatedOldVal).
addReg(Src2)
5577 BuildMI(MBB, DL, TII->get(SystemZ::PHI), RotatedNewVal)
5578 .addReg(RotatedOldVal).
addMBB(LoopMBB)
5581 BuildMI(MBB, DL, TII->get(SystemZ::RLL), NewVal)
5583 BuildMI(MBB, DL, TII->get(CSOpcode), Dest)
5585 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5597 SystemZTargetLowering::emitAtomicCmpSwapW(
MachineInstr &MI,
5621 assert(LOpcode && CSOpcode &&
"Displacement out of range");
5644 BuildMI(MBB, DL, TII->get(LOpcode), OrigOldVal)
5663 BuildMI(MBB, DL, TII->get(SystemZ::PHI), OldVal)
5664 .addReg(OrigOldVal).
addMBB(StartMBB)
5666 BuildMI(MBB, DL, TII->get(SystemZ::PHI), CmpVal)
5667 .addReg(OrigCmpVal).
addMBB(StartMBB)
5669 BuildMI(MBB, DL, TII->get(SystemZ::PHI), SwapVal)
5670 .addReg(OrigSwapVal).
addMBB(StartMBB)
5672 BuildMI(MBB, DL, TII->get(SystemZ::RLL), Dest)
5674 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetryCmpVal)
5676 BuildMI(MBB, DL, TII->get(SystemZ::CR))
5677 .addReg(Dest).
addReg(RetryCmpVal);
5678 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5694 BuildMI(MBB, DL, TII->get(SystemZ::RISBG32), RetrySwapVal)
5696 BuildMI(MBB, DL, TII->get(SystemZ::RLL), StoreVal)
5697 .addReg(RetrySwapVal).
addReg(NegBitShift).
addImm(-BitSize);
5698 BuildMI(MBB, DL, TII->get(CSOpcode), RetryOldVal)
5700 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5727 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::IMPLICIT_DEF), In128);
5732 BuildMI(*MBB, MI, DL, TII->get(SystemZ::LLILL), Zero64)
5734 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), NewIn128)
5735 .addReg(In128).
addReg(Zero64).
addImm(SystemZ::subreg_h64);
5738 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::INSERT_SUBREG), Dest)
5769 uint64_t StartSrcReg =
forceReg(MI, SrcBase, TII);
5770 uint64_t StartDestReg = (HaveSingleBase ? StartSrcReg :
5775 uint64_t ThisDestReg = (HaveSingleBase ? ThisSrcReg :
5778 uint64_t NextDestReg = (HaveSingleBase ? NextSrcReg :
5781 RC = &SystemZ::GR64BitRegClass;
5808 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisDestReg)
5809 .addReg(StartDestReg).
addMBB(StartMBB)
5811 if (!HaveSingleBase)
5812 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisSrcReg)
5813 .addReg(StartSrcReg).
addMBB(StartMBB)
5815 BuildMI(MBB, DL, TII->get(SystemZ::PHI), ThisCountReg)
5816 .addReg(StartCountReg).
addMBB(StartMBB)
5819 BuildMI(MBB, DL, TII->get(SystemZ::PFD))
5822 BuildMI(MBB, DL, TII->get(Opcode))
5826 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5844 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextDestReg)
5846 if (!HaveSingleBase)
5847 BuildMI(MBB, DL, TII->get(SystemZ::LA), NextSrcReg)
5849 BuildMI(MBB, DL, TII->get(SystemZ::AGHI), NextCountReg)
5850 .addReg(ThisCountReg).
addImm(-1);
5851 BuildMI(MBB, DL, TII->get(SystemZ::CGHI))
5852 .addReg(NextCountReg).
addImm(0);
5853 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5865 while (Length > 0) {
5866 uint64_t ThisLength =
std::min(Length, uint64_t(256));
5869 if (!isUInt<12>(DestDisp)) {
5878 if (!isUInt<12>(SrcDisp)) {
5887 BuildMI(*MBB, MI, DL, TII->get(Opcode))
5888 .
addOperand(DestBase).addImm(DestDisp).addImm(ThisLength)
5889 .addOperand(SrcBase).addImm(SrcDisp);
5890 DestDisp += ThisLength;
5891 SrcDisp += ThisLength;
5892 Length -= ThisLength;
5895 if (EndMBB && Length > 0) {
5897 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5954 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This1Reg)
5955 .addReg(Start1Reg).
addMBB(StartMBB)
5957 BuildMI(MBB, DL, TII->get(SystemZ::PHI), This2Reg)
5958 .addReg(Start2Reg).
addMBB(StartMBB)
5960 BuildMI(MBB, DL, TII->get(TargetOpcode::COPY), SystemZ::R0L).addReg(CharReg);
5961 BuildMI(MBB, DL, TII->get(Opcode))
5964 BuildMI(MBB, DL, TII->get(SystemZ::BRC))
5978 bool NoFloat)
const {
5989 static const unsigned GPRControlBit[16] = {
5990 0x8000, 0x8000, 0x4000, 0x4000, 0x2000, 0x2000, 0x1000, 0x1000,
5991 0x0800, 0x0800, 0x0400, 0x0400, 0x0200, 0x0200, 0x0100, 0x0100
5993 Control |= GPRControlBit[15];
5995 Control |= GPRControlBit[11];
5999 for (
int I = 0; I < 16; I++) {
6000 if ((Control & GPRControlBit[I]) == 0) {
6007 if (!NoFloat && (Control & 4) != 0) {
6009 for (
int I = 0; I < 32; I++) {
6014 for (
int I = 0; I < 16; I++) {
6040 BuildMI(*MBB, MI, DL, TII->get(Opcode), DstReg)
6050 case SystemZ::Select32Mux:
6051 return emitSelect(MI, MBB,
6053 case SystemZ::Select32:
6054 return emitSelect(MI, MBB, SystemZ::LOCR);
6055 case SystemZ::Select64:
6056 return emitSelect(MI, MBB, SystemZ::LOCGR);
6057 case SystemZ::SelectF32:
6058 case SystemZ::SelectF64:
6059 case SystemZ::SelectF128:
6060 return emitSelect(MI, MBB, 0);
6062 case SystemZ::CondStore8Mux:
6063 return emitCondStore(MI, MBB, SystemZ::STCMux, 0,
false);
6064 case SystemZ::CondStore8MuxInv:
6065 return emitCondStore(MI, MBB, SystemZ::STCMux, 0,
true);
6066 case SystemZ::CondStore16Mux:
6067 return emitCondStore(MI, MBB, SystemZ::STHMux, 0,
false);
6068 case SystemZ::CondStore16MuxInv:
6069 return emitCondStore(MI, MBB, SystemZ::STHMux, 0,
true);
6070 case SystemZ::CondStore32Mux:
6071 return emitCondStore(MI, MBB, SystemZ::STMux, SystemZ::STOCMux,
false);
6072 case SystemZ::CondStore32MuxInv:
6073 return emitCondStore(MI, MBB, SystemZ::STMux, SystemZ::STOCMux,
true);
6074 case SystemZ::CondStore8:
6075 return emitCondStore(MI, MBB, SystemZ::STC, 0,
false);
6076 case SystemZ::CondStore8Inv:
6077 return emitCondStore(MI, MBB, SystemZ::STC, 0,
true);
6078 case SystemZ::CondStore16:
6079 return emitCondStore(MI, MBB, SystemZ::STH, 0,
false);
6080 case SystemZ::CondStore16Inv:
6081 return emitCondStore(MI, MBB, SystemZ::STH, 0,
true);
6082 case SystemZ::CondStore32:
6083 return emitCondStore(MI, MBB,
SystemZ::ST, SystemZ::STOC,
false);
6084 case SystemZ::CondStore32Inv:
6085 return emitCondStore(MI, MBB,
SystemZ::ST, SystemZ::STOC,
true);
6086 case SystemZ::CondStore64:
6087 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG,
false);
6088 case SystemZ::CondStore64Inv:
6089 return emitCondStore(MI, MBB, SystemZ::STG, SystemZ::STOCG,
true);
6090 case SystemZ::CondStoreF32:
6091 return emitCondStore(MI, MBB, SystemZ::STE, 0,
false);
6092 case SystemZ::CondStoreF32Inv:
6093 return emitCondStore(MI, MBB, SystemZ::STE, 0,
true);
6094 case SystemZ::CondStoreF64:
6095 return emitCondStore(MI, MBB, SystemZ::STD, 0,
false);
6096 case SystemZ::CondStoreF64Inv:
6097 return emitCondStore(MI, MBB, SystemZ::STD, 0,
true);
6099 case SystemZ::AEXT128_64:
6100 return emitExt128(MI, MBB,
false, SystemZ::subreg_l64);
6101 case SystemZ::ZEXT128_32:
6102 return emitExt128(MI, MBB,
true, SystemZ::subreg_l32);
6103 case SystemZ::ZEXT128_64:
6104 return emitExt128(MI, MBB,
true, SystemZ::subreg_l64);
6107 return emitAtomicLoadBinary(MI, MBB, 0, 0);
6108 case SystemZ::ATOMIC_SWAP_32:
6109 return emitAtomicLoadBinary(MI, MBB, 0, 32);
6110 case SystemZ::ATOMIC_SWAP_64:
6111 return emitAtomicLoadBinary(MI, MBB, 0, 64);
6113 case SystemZ::ATOMIC_LOADW_AR:
6114 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 0);
6115 case SystemZ::ATOMIC_LOADW_AFI:
6116 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 0);
6117 case SystemZ::ATOMIC_LOAD_AR:
6118 return emitAtomicLoadBinary(MI, MBB, SystemZ::AR, 32);
6119 case SystemZ::ATOMIC_LOAD_AHI:
6120 return emitAtomicLoadBinary(MI, MBB, SystemZ::AHI, 32);
6121 case SystemZ::ATOMIC_LOAD_AFI:
6122 return emitAtomicLoadBinary(MI, MBB, SystemZ::AFI, 32);
6123 case SystemZ::ATOMIC_LOAD_AGR:
6124 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGR, 64);
6125 case SystemZ::ATOMIC_LOAD_AGHI:
6126 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGHI, 64);
6127 case SystemZ::ATOMIC_LOAD_AGFI:
6128 return emitAtomicLoadBinary(MI, MBB, SystemZ::AGFI, 64);
6130 case SystemZ::ATOMIC_LOADW_SR:
6131 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 0);
6132 case SystemZ::ATOMIC_LOAD_SR:
6133 return emitAtomicLoadBinary(MI, MBB, SystemZ::SR, 32);
6134 case SystemZ::ATOMIC_LOAD_SGR:
6135 return emitAtomicLoadBinary(MI, MBB, SystemZ::SGR, 64);
6137 case SystemZ::ATOMIC_LOADW_NR:
6138 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0);
6139 case SystemZ::ATOMIC_LOADW_NILH:
6140 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0);
6141 case SystemZ::ATOMIC_LOAD_NR:
6142 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32);
6143 case SystemZ::ATOMIC_LOAD_NILL:
6144 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32);
6145 case SystemZ::ATOMIC_LOAD_NILH:
6146 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32);
6147 case SystemZ::ATOMIC_LOAD_NILF:
6148 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32);
6149 case SystemZ::ATOMIC_LOAD_NGR:
6150 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64);
6151 case SystemZ::ATOMIC_LOAD_NILL64:
6152 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64);
6153 case SystemZ::ATOMIC_LOAD_NILH64:
6154 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64);
6155 case SystemZ::ATOMIC_LOAD_NIHL64:
6156 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64);
6157 case SystemZ::ATOMIC_LOAD_NIHH64:
6158 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64);
6159 case SystemZ::ATOMIC_LOAD_NILF64:
6160 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64);
6161 case SystemZ::ATOMIC_LOAD_NIHF64:
6162 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64);
6165 return emitAtomicLoadBinary(MI, MBB,
SystemZ::OR, 0);
6166 case SystemZ::ATOMIC_LOADW_OILH:
6167 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 0);
6169 return emitAtomicLoadBinary(MI, MBB,
SystemZ::OR, 32);
6170 case SystemZ::ATOMIC_LOAD_OILL:
6171 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL, 32);
6172 case SystemZ::ATOMIC_LOAD_OILH:
6173 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH, 32);
6174 case SystemZ::ATOMIC_LOAD_OILF:
6175 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF, 32);
6176 case SystemZ::ATOMIC_LOAD_OGR:
6177 return emitAtomicLoadBinary(MI, MBB, SystemZ::OGR, 64);
6178 case SystemZ::ATOMIC_LOAD_OILL64:
6179 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILL64, 64);
6180 case SystemZ::ATOMIC_LOAD_OILH64:
6181 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILH64, 64);
6182 case SystemZ::ATOMIC_LOAD_OIHL64:
6183 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHL64, 64);
6184 case SystemZ::ATOMIC_LOAD_OIHH64:
6185 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHH64, 64);
6186 case SystemZ::ATOMIC_LOAD_OILF64:
6187 return emitAtomicLoadBinary(MI, MBB, SystemZ::OILF64, 64);
6188 case SystemZ::ATOMIC_LOAD_OIHF64:
6189 return emitAtomicLoadBinary(MI, MBB, SystemZ::OIHF64, 64);
6191 case SystemZ::ATOMIC_LOADW_XR:
6192 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 0);
6193 case SystemZ::ATOMIC_LOADW_XILF:
6194 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 0);
6195 case SystemZ::ATOMIC_LOAD_XR:
6196 return emitAtomicLoadBinary(MI, MBB, SystemZ::XR, 32);
6197 case SystemZ::ATOMIC_LOAD_XILF:
6198 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF, 32);
6199 case SystemZ::ATOMIC_LOAD_XGR:
6200 return emitAtomicLoadBinary(MI, MBB, SystemZ::XGR, 64);
6201 case SystemZ::ATOMIC_LOAD_XILF64:
6202 return emitAtomicLoadBinary(MI, MBB, SystemZ::XILF64, 64);
6203 case SystemZ::ATOMIC_LOAD_XIHF64:
6204 return emitAtomicLoadBinary(MI, MBB, SystemZ::XIHF64, 64);
6206 case SystemZ::ATOMIC_LOADW_NRi:
6207 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 0,
true);
6208 case SystemZ::ATOMIC_LOADW_NILHi:
6209 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 0,
true);
6210 case SystemZ::ATOMIC_LOAD_NRi:
6211 return emitAtomicLoadBinary(MI, MBB, SystemZ::NR, 32,
true);
6212 case SystemZ::ATOMIC_LOAD_NILLi:
6213 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL, 32,
true);
6214 case SystemZ::ATOMIC_LOAD_NILHi:
6215 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH, 32,
true);
6216 case SystemZ::ATOMIC_LOAD_NILFi:
6217 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF, 32,
true);
6218 case SystemZ::ATOMIC_LOAD_NGRi:
6219 return emitAtomicLoadBinary(MI, MBB, SystemZ::NGR, 64,
true);
6220 case SystemZ::ATOMIC_LOAD_NILL64i:
6221 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILL64, 64,
true);
6222 case SystemZ::ATOMIC_LOAD_NILH64i:
6223 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILH64, 64,
true);
6224 case SystemZ::ATOMIC_LOAD_NIHL64i:
6225 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHL64, 64,
true);
6226 case SystemZ::ATOMIC_LOAD_NIHH64i:
6227 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHH64, 64,
true);
6228 case SystemZ::ATOMIC_LOAD_NILF64i:
6229 return emitAtomicLoadBinary(MI, MBB, SystemZ::NILF64, 64,
true);
6230 case SystemZ::ATOMIC_LOAD_NIHF64i:
6231 return emitAtomicLoadBinary(MI, MBB, SystemZ::NIHF64, 64,
true);
6234 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
6236 case SystemZ::ATOMIC_LOAD_MIN_32:
6237 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
6239 case SystemZ::ATOMIC_LOAD_MIN_64:
6240 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR,
6244 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
6246 case SystemZ::ATOMIC_LOAD_MAX_32:
6247 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CR,
6249 case SystemZ::ATOMIC_LOAD_MAX_64:
6250 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CGR,
6254 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
6256 case SystemZ::ATOMIC_LOAD_UMIN_32:
6257 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
6259 case SystemZ::ATOMIC_LOAD_UMIN_64:
6260 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR,
6264 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
6266 case SystemZ::ATOMIC_LOAD_UMAX_32:
6267 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLR,
6269 case SystemZ::ATOMIC_LOAD_UMAX_64:
6270 return emitAtomicLoadMinMax(MI, MBB, SystemZ::CLGR,
6274 return emitAtomicCmpSwapW(MI, MBB);
6275 case SystemZ::MVCSequence:
6276 case SystemZ::MVCLoop:
6278 case SystemZ::NCSequence:
6279 case SystemZ::NCLoop:
6281 case SystemZ::OCSequence:
6282 case SystemZ::OCLoop:
6284 case SystemZ::XCSequence:
6285 case SystemZ::XCLoop:
6287 case SystemZ::CLCSequence:
6288 case SystemZ::CLCLoop:
6290 case SystemZ::CLSTLoop:
6291 return emitStringWrapper(MI, MBB, SystemZ::CLST);
6292 case SystemZ::MVSTLoop:
6293 return emitStringWrapper(MI, MBB, SystemZ::MVST);
6294 case SystemZ::SRSTLoop:
6295 return emitStringWrapper(MI, MBB, SystemZ::SRST);
6298 case SystemZ::TBEGIN_nofloat:
6300 case SystemZ::TBEGINC:
6301 return emitTransactionBegin(MI, MBB, SystemZ::TBEGINC,
true);
6302 case SystemZ::LTEBRCompare_VecPseudo:
6303 return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTEBR);
6304 case SystemZ::LTDBRCompare_VecPseudo:
6305 return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTDBR);
6306 case SystemZ::LTXBRCompare_VecPseudo:
6307 return emitLoadAndTestCmp0(MI, MBB, SystemZ::LTXBR);
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
static SDValue getPermuteNode(SelectionDAG &DAG, const SDLoc &DL, const Permute &P, SDValue Op0, SDValue Op1)
static unsigned CCMaskForCondCode(ISD::CondCode CC)
void setFrameAddressIsTaken(bool T)
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
const unsigned CCMASK_CMP_GT
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
constexpr bool isUInt< 32 >(uint64_t x)
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
static MVT getIntegerVT(unsigned BitWidth)
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
void push_back(const T &Elt)
unsigned Log2_32_Ceil(uint32_t Value)
Log2_32_Ceil - This function returns the ceil log base 2 of the specified value, 32 if the value is z...
BUILTIN_OP_END - This must be the last enum value in this list.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
A parsed version of the target data layout string in and methods for querying it. ...
static bool getShuffleInput(const SmallVectorImpl< int > &Bytes, unsigned Start, unsigned BytesPerElement, int &Base)
static SDValue getCCResult(SelectionDAG &DAG, SDNode *After)
SDValue getValue(unsigned R) const
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
static void VerifyVectorTypes(const SmallVectorImpl< ISD::InputArg > &Ins)
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
T findLastSet(T Val, ZeroBehavior ZB=ZB_Max)
Get the index of the last set bit starting from the least significant bit.
int getFramePointerSaveIndex() const
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
LLVMContext * getContext() const
unsigned getVarArgsFrameIndex() const
const unsigned GR32Regs[16]
static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode, unsigned &CCValid)
uint64_t getZExtValue() const
Get zero extended value.
const int64_t CallFrameSize
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd)...
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &, EVT) const override
Return the ValueType of the result of SETCC operations.
static SDValue getAbsolute(SelectionDAG &DAG, const SDLoc &DL, SDValue Op, bool IsNegative)
static IPMConversion getIPMConversion(unsigned CCValid, unsigned CCMask)
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
BR_CC - Conditional branch.
static SDValue buildScalarToVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Value)
static const Permute PermuteForms[]
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
LocInfo getLocInfo() const
const MCPhysReg ArgFPRs[NumArgFPRs]
static MVT getVectorVT(MVT VT, unsigned NumElements)
bool hasOneUse() const
Return true if there is exactly one use of this node.
const unsigned CCMASK_FCMP
const unsigned FP128Regs[16]
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
unsigned odd128(bool Is32bit)
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array...
SDVTList getVTList() const
bool isPC32DBLSymbol(const GlobalValue *GV, CodeModel::Model CM) const
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void addLiveIn(unsigned Reg, unsigned vreg=0)
addLiveIn - Add the specified register as a live-in.
static bool isImmHH(uint64_t Val)
static SDValue tryBuildVectorShuffle(SelectionDAG &DAG, BuildVectorSDNode *BVN)
const TargetMachine & getTarget() const
unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
static SDValue getVectorCmp(SelectionDAG &DAG, unsigned Opcode, const SDLoc &DL, EVT VT, SDValue CmpOp0, SDValue CmpOp1)
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain...
This class represents a function call, abstracting a target machine's calling convention.
void computeKnownBits(SDValue Op, APInt &KnownZero, APInt &KnownOne, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in the KnownZero/KnownO...
const GlobalValue * getGlobal() const
static void adjustZeroCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
bool mayBeEmittedAsTailCall(CallInst *CI) const override
Return true if the target may be able emit the call instruction as a tail call.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
static SDValue tryBuildVectorReplicate(SelectionDAG &DAG, const SystemZInstrInfo *TII, const SDLoc &DL, EVT VT, uint64_t Value, unsigned BitsPerElement)
bool hasInterlockedAccess1() const
static Comparison getIntrinsicCmp(SelectionDAG &DAG, unsigned Opcode, SDValue Call, unsigned CCValid, uint64_t CC, ISD::CondCode Cond)
unsigned getNumOperands() const
Return the number of values used by this operation.
static bool canUseSiblingCall(const CCState &ArgCCInfo, SmallVectorImpl< CCValAssign > &ArgLocs, SmallVectorImpl< ISD::OutputArg > &Outs)
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
unsigned getNumOperands() const
unsigned getValueSizeInBits() const
Returns the size of the value in bits.
constexpr bool isInt< 16 >(int64_t x)
int64_t getOffset() const
bool isLegalAddImmediate(int64_t Imm) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
const SDValue & getOperand(unsigned Num) const
const Function * getFunction() const
getFunction - Return the LLVM function that this machine code represents
static SDValue joinDwords(SelectionDAG &DAG, const SDLoc &DL, SDValue Op0, SDValue Op1)
const unsigned FP32Regs[16]
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT TVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &ArgsFlags, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
void reserve(size_type N)
Value * CallOperandVal
If this is the result output operand or a clobber, this is null, otherwise it is the incoming operand...
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
static bool chooseShuffleOpNos(int *OpNos, unsigned &OpNo0, unsigned &OpNo1)
const SDValue & getBasePtr() const
SDValue getConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offs=0, bool isT=false, unsigned char TargetFlags=0)
std::size_t countLeadingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the most significant bit to the least stopping at the first 1...
static bool shouldSwapCmpOperands(const Comparison &C)
const unsigned CCMASK_ICMP
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
TargetLowering::ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
unsigned getResNo() const
get the index which selects a specific result in the SDNode
static unsigned getVectorComparison(ISD::CondCode CC, bool IsFP)
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
static MCDisassembler::DecodeStatus addOperand(MCInst &Inst, const MCOperand &Opnd)
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations...
bool isVector() const
isVector - Return true if this is a vector value type.
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
bool hasLoadStoreOnCond() const
The address of a basic block.
A description of a memory reference used in the backend.
static bool matchDoublePermute(const SmallVectorImpl< int > &Bytes, const Permute &P, SmallVectorImpl< int > &Transform)
struct fuzzer::@269 Flags
int64_t getOffset() const
static MachineBasicBlock * splitBlockAfter(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
const HexagonInstrInfo * TII
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
static MachineOperand CreateReg(unsigned Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false)
Shift and rotation operations.
static bool isShlDoublePermute(const SmallVectorImpl< int > &Bytes, unsigned &StartIndex, unsigned &OpNo0, unsigned &OpNo1)
virtual bool hasFP(const MachineFunction &MF) const =0
hasFP - Return true if the specified function should have a dedicated frame pointer register...
const unsigned CCMASK_TM_MSB_0
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
void setVarArgsFirstGPR(unsigned GPR)
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s), MachineInstr opcode, and operands.
MachineFunction & getMachineFunction() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const TargetRegisterClass * getRegClass(unsigned Reg) const
Return the register class of the specified virtual register.
Reg
All possible values of the reg field in the ModR/M byte.
EVT getScalarType() const
getScalarType - If this is a vector type, return the element type, otherwise return this...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt) For double-word atomic operations: ValLo, ValHi, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amtLo, amtHi) ValLo, ValHi, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amtLo, amtHi) These correspond to the atomicrmw instruction.
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
int getMaskElt(unsigned Idx) const
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG...
static void lowerMUL_LOHI32(SelectionDAG &DAG, const SDLoc &DL, unsigned Extend, SDValue Op0, SDValue Op1, SDValue &Hi, SDValue &Lo)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
bool isInteger() const
isInteger - Return true if this is an integer, or a vector integer type.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_NODISCARD bool empty() const
SmallVector< ISD::InputArg, 32 > Ins
AtomicOrdering
Atomic ordering for LLVM's memory model.
STACKSAVE - STACKSAVE has one operand, an input chain.
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
EVT getVectorElementType() const
getVectorElementType - Given a vector type, return the type of each element.
static bool isScalarToVector(SDValue Op)
bool hasLoadStoreOnCond2() const
unsigned getLocReg() const
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
SDValue getRegisterMask(const uint32_t *RegMask)
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
bool isLegalICmpImmediate(int64_t Imm) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
This contains information for each constraint that we are lowering.
Simple integer binary arithmetic operators.
const unsigned CCMASK_TBEGIN
static bool add(uint64_t *dest, const uint64_t *x, const uint64_t *y, unsigned len)
This function adds the integer array x to the integer array Y and places the result in dest...
SmallVector< ISD::OutputArg, 32 > Outs
const unsigned NumArgFPRs
static bool matchPermute(const SmallVectorImpl< int > &Bytes, const Permute &P, unsigned &OpNo0, unsigned &OpNo1)
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
void incNumLocalDynamicTLSAccesses()
bool isFloatingPointTy() const
Return true if this is one of the six floating-point types.
MachineConstantPoolValue * getMachineCPVal() const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
static mvt_range integer_vector_valuetypes()
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
const unsigned CCMASK_ANY
EVT getMemoryVT() const
Return the type of the in-memory value.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
const unsigned VectorBits
bool isTruncateFree(Type *, Type *) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
const unsigned CCMASK_CS_NE
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
const unsigned CCMASK_TM_SOME_0
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, const Value *PtrVal, unsigned Alignment, AtomicOrdering Ordering, SynchronizationScope SynchScope)
Gets a node for an atomic op, produces result (if relevant) and chain and takes 2 operands...
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
const DataLayout & getDataLayout() const
Maximum length of the test input libFuzzer tries to guess a good value based on the corpus and reports it always prefer smaller inputs during the corpus shuffle When libFuzzer itself reports a bug this exit code will be used If indicates the maximal total time in seconds to run the fuzzer minimizes the provided crash input Use with etc Experimental Use value profile to guide fuzzing Number of simultaneous worker processes to run the jobs If min(jobs, NumberOfCpuCores()/2)\" is used.") FUZZER_FLAG_INT(reload
const unsigned CCMASK_TM_ALL_1
SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
UNDEF - An undefined node.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
static GCRegistry::Add< CoreCLRGC > E("coreclr","CoreCLR-compatible GC")
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
const MachineBasicBlock * getParent() const
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the specified, possibly variable...
bool isFoldableMemAccessOffset(Instruction *I, int64_t Offset) const override
SystemZTargetLowering(const TargetMachine &TM, const SystemZSubtarget &STI)
SDNode * getNode() const
get the SDNode which holds the desired result
static SDValue emitIntrinsicWithChainAndGlue(SelectionDAG &DAG, SDValue Op, unsigned Opcode)
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
unsigned getScalarSizeInBits() const
unsigned getStoreSize() const
getStoreSize - Return the number of bytes overwritten by a store of the specified value type...
static void adjustForLTGFR(Comparison &C)
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
void addLiveIn(MCPhysReg PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
unsigned getOpcodeForOffset(unsigned Opcode, int64_t Offset) const
constexpr bool isUInt< 8 >(uint64_t x)
unsigned const MachineRegisterInfo * MRI
std::size_t countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the least significant bit to the most stopping at the first 1...
bool isMachineConstantPoolEntry() const
CodeModel::Model getCodeModel() const
Returns the code model.
MVT - Machine Value Type.
static void getVPermMask(ShuffleVectorSDNode *VSN, SmallVectorImpl< int > &Bytes)
const unsigned CCMASK_VCMP
const SDValue & getOperand(unsigned i) const
The instances of the Type class are immutable: once they are created, they are never changed...
This is an important class for using LLVM in a threaded context.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type...
Simple binary floating point operators.
void setTargetDAGCombine(ISD::NodeType NT)
Targets should invoke this method for each target independent node that they want to provide a custom...
C - The default llvm calling convention, compatible with C.
bool isVectorTy() const
True if this is an instance of VectorType.
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align=0, bool Vol=false, bool ReadMem=true, bool WriteMem=true, unsigned Size=0)
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType)
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
This is an important base class in LLVM.
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE...
bool isVector() const
isVector - Return true if this is a vector value type.
void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL...
static std::pair< unsigned, const TargetRegisterClass * > parseRegisterNumber(StringRef Constraint, const TargetRegisterClass *RC, const unsigned *Map)
const Constant * getConstVal() const
const MachineOperand & getOperand(unsigned i) const
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
const unsigned CCMASK_CMP_LE
static SDValue buildVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, SmallVectorImpl< SDValue > &Elems)
static ManagedStatic< OptionRegistry > OR
static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
static mvt_range fp_valuetypes()
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
static SDValue expandV4F32ToV2F64(SelectionDAG &DAG, int Start, const SDLoc &DL, SDValue Op)
void setRegSaveFrameIndex(unsigned FI)
static const unsigned End
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
void setImm(int64_t immVal)
unsigned getOpcode() const
const unsigned CCMASK_TM_MSB_1
TRAP - Trapping instruction.
Value * getOperand(unsigned i) const
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
const unsigned CCMASK_CMP_LT
static SDValue buildMergeScalars(SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Op0, SDValue Op1)
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
static mvt_range vector_valuetypes()
CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y)...
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool Immutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
const unsigned CCMASK_TDC
static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1, ISD::CondCode Cond, const SDLoc &DL)
use_iterator use_begin() const
Provide iteration support to walk over all uses of an SDNode.
static unsigned forceReg(MachineInstr &MI, MachineOperand &Base, const SystemZInstrInfo *TII)
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo...
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
bool isRxSBGMask(uint64_t Mask, unsigned BitSize, unsigned &Start, unsigned &End) const
unsigned getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
EVT - Extended Value Type.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
static SDValue convertLocVTToValVT(SelectionDAG &DAG, const SDLoc &DL, CCValAssign &VA, SDValue Chain, SDValue Value)
This structure contains all information that is necessary for lowering calls.
static bool isImmLH(uint64_t Val)
T findFirstSet(T Val, ZeroBehavior ZB=ZB_Max)
Get the index of the first set bit starting from the least significant bit.
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array...
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, unsigned Align, bool *Fast) const override
Determine if the target supports unaligned memory accesses.
This class contains a discriminated union of information about pointers in memory operands...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const BlockAddress * getBlockAddress() const
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands...
SDValue CreateStackTemporary(EVT VT, unsigned minAlign=1)
Create a stack temporary, suitable for holding the specified value type.
const SystemZInstrInfo * getInstrInfo() const override
const unsigned CCMASK_TM_MIXED_MSB_0
bool allowTruncateForTailCall(Type *, Type *) const override
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
void setIsKill(bool Val=true)
bool hasPopulationCount() const
SDValue getTargetConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offset=0, unsigned char TargetFlags=0)
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned char TargetFlags=0)
TokenFactor - This node takes multiple tokens as input and produces a single token result...
const TargetFrameLowering * getFrameLowering() const override
Iterator for intrusive lists based on ilist_node.
CCState - This class holds information needed while lowering arguments and return values...
TargetLowering::ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
void setDesc(const MCInstrDesc &tid)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one...
constexpr bool isInt< 32 >(int64_t x)
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
void setVarArgsFrameIndex(unsigned FI)
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
MachineOperand class - Representation of each machine instruction operand.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
const unsigned CCMASK_CMP_EQ
Type * getType() const
All values are typed, get the type of this value.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
static bool canTreatAsByteVector(EVT VT)
CCValAssign - Represent assignment of one arg/retval to a location.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
const unsigned FP64Regs[16]
static bool isSimpleShift(SDValue N, unsigned &ShiftVal)
BRCOND - Conditional branch.
Information about stack frame layout on the target.
const SDValue & getChain() const
static bool tryBuildVectorByteMask(BuildVectorSDNode *BVN, uint64_t &Mask)
static SDValue convertValVTToLocVT(SelectionDAG &DAG, const SDLoc &DL, CCValAssign &VA, SDValue Value)
Byte Swap and Counting operators.
const unsigned CCMASK_CMP_O
const unsigned GR128Regs[16]
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
const unsigned CCMASK_CMP_NE
Represents one node in the SelectionDAG.
CondCode getSetCCInverse(CondCode Operation, bool isInteger)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
const unsigned GR64Regs[16]
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
static GCRegistry::Add< ShadowStackGC > C("shadow-stack","Very portable GC for uncooperative code generators")
static mvt_range integer_valuetypes()
const unsigned CCMASK_TM_MIXED_MSB_1
static SDValue emitSETCC(SelectionDAG &DAG, const SDLoc &DL, SDValue Glue, unsigned CCValid, unsigned CCMask)
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
SDValue getCALLSEQ_START(SDValue Chain, SDValue Op, const SDLoc &DL)
Return a new CALLSEQ_START node, which always must have a glue result (to ensure it's not CSE'd)...
Class for arbitrary precision integers.
virtual const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
Return a mask of call-preserved registers for the given calling convention on the current function...
const unsigned VectorBytes
iterator_range< use_iterator > uses()
A "pseudo-class" with methods for operating on BUILD_VECTORs.
Select(COND, TRUEVAL, FALSEVAL).
void setMinFunctionAlignment(unsigned Align)
Set the target's minimum function alignment (in log2(bytes))
int64_t getSExtValue() const
bool isIntegerTy() const
True if this is an instance of IntegerType.
unsigned EmulatedTLS
EmulatedTLS - This flag enables emulated TLS model, using emutls function in the runtime library...
void setVarArgsFirstFPR(unsigned FPR)
static use_iterator use_end()
ZERO_EXTEND - Used for integer types, zeroing the new bits.
static void VerifyVectorType(MVT VT, EVT ArgVT)
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *BB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
ANY_EXTEND - Used for integer types. The high bits are undefined.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
const unsigned CCMASK_CMP_GE
static MachineOperand earlyUseOperand(MachineOperand Op)
static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
static SDValue getGeneralPermuteNode(SelectionDAG &DAG, const SDLoc &DL, SDValue *Ops, const SmallVectorImpl< int > &Bytes)
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca...
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
BR_JT - Jumptable branch.
Representation of each machine instruction.
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the source.
static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT, unsigned Extend, unsigned Opcode, SDValue Op0, SDValue Op1, SDValue &Even, SDValue &Odd)
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned char TargetFlags=0)
SmallVector< SDValue, 32 > OutVals
static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg)
static MachineBasicBlock * emitBlockAfter(MachineBasicBlock *MBB)
Bitwise operators - logical and, logical or, logical xor.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
static void adjustForSubtraction(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
static bool is32Bit(EVT VT)
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
unsigned getSizeInBits() const
getSizeInBits - Return the size of the specified value type in bits.
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
bool hasOneUse() const
Return true if there is exactly one user of this value.
unsigned MaxStoresPerMemcpyOptSize
Maximum number of store operations that may be substituted for a call to memcpy, used for functions w...
void setStackPointerRegisterToSaveRestore(unsigned R)
If set to a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save and restore.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
SDValue prepareVolatileOrAtomicLoad(SDValue Chain, const SDLoc &DL, SelectionDAG &DAG) const override
This callback is used to prepare for a volatile or atomic load.
static MachineBasicBlock * splitBlockBefore(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
FSINCOS - Compute both fsin and fcos as a single operation.
unsigned MaxStoresPerMemcpy
Specify maximum bytes of store instructions per memcpy call.
constexpr bool isUInt< 16 >(uint64_t x)
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
static unsigned reverseCCMask(unsigned CCMask)
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
unsigned getRegSaveFrameIndex() const
EVT getValueType() const
Return the ValueType of the referenced return value.
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask, uint64_t Mask, uint64_t CmpVal, unsigned ICmpType)
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
SDValue getGLOBAL_OFFSET_TABLE(EVT VT)
Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
unsigned getPointerSize() const
Get the pointer size for this target.
unsigned getReg() const
getReg - Returns the register number.
bool isFloatingPoint() const
isFloatingPoint - Return true if this is a FP, or a vector FP type.
static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
user_iterator user_begin()
void insert(iterator MBBI, MachineBasicBlock *MBB)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
void setReturnAddressIsTaken(bool s)
bool isSimple() const
isSimple - Test if the given EVT is simple (as opposed to being extended).
unsigned getAlignment() const
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
LLVM Value Representation.
FMA - Perform a * b + c with no intermediate rounding step.
SDValue getRegister(unsigned Reg, EVT VT)
unsigned even128(bool Is32bit)
bool isFPImmLegal(const APFloat &Imm, EVT VT) const override
Returns true if the target can instruction select the specified FP immediate natively.
unsigned getVarArgsFirstGPR() const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
SDValue getValueType(EVT)
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
static void adjustSubwordCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
PREFETCH - This corresponds to a prefetch intrinsic.
static bool isIntrinsicWithCC(SDValue Op, unsigned &Opcode, unsigned &CCValid)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits...
const MachineInstrBuilder & addOperand(const MachineOperand &MO) const
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.Val alone...
static bool isImmLL(uint64_t Val)
Primary interface to the complete machine description for the target machine.
A SystemZ-specific constant pool value.
static SDValue lowerVectorSETCC(SelectionDAG &DAG, const SDLoc &DL, EVT VT, ISD::CondCode CC, SDValue CmpOp0, SDValue CmpOp1)
StringRef - Represent a constant reference to a string, i.e.
SetCC operator - This evaluates to a true value iff the condition is true.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
static SDValue emitIntrinsicWithGlue(SelectionDAG &DAG, SDValue Op, unsigned Opcode)
bool hasFPExtension() const
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
unsigned MaxStoresPerMemsetOptSize
Maximum number of stores operations that may be substituted for the call to memset, used for functions with OptSize attribute.
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
unsigned getLocMemOffset() const
static bool isVolatile(Instruction *Inst)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
static void adjustForFNeg(Comparison &C)
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
const unsigned CCMASK_TM_ALL_0
bool SignBitIsZero(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero.
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
TRUNCATE - Completely drop the high bits.
const unsigned CCMASK_TEND
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW, FLOG, FLOG2, FLOG10, FEXP, FEXP2, FCEIL, FTRUNC, FRINT, FNEARBYINT, FROUND, FFLOOR - Perform various unary floating point operations.
Fast - This calling convention attempts to make calls as fast as possible (e.g.
const unsigned VR128Regs[32]
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
bool isFMAFasterThanFMulAndFAdd(EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
static unsigned getVectorComparisonOrInvert(ISD::CondCode CC, bool IsFP, bool &Invert)
const SystemZRegisterInfo * getRegisterInfo() const override
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
EVT changeVectorElementTypeToInteger() const
changeVectorElementTypeToInteger - Return a vector with the same number of elements as this vector...
const unsigned CCMASK_TM_SOME_1
MVT getSimpleVT() const
getSimpleVT - Return the SimpleValueType held in the specified simple EVT.
unsigned getVarArgsFirstFPR() const
static SystemZConstantPoolValue * Create(const GlobalValue *GV, SystemZCP::SystemZCPModifier Modifier)
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned char TargetFlags=0)
const unsigned CCMASK_CMP_UO
uint64_t getZExtValue() const
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
unsigned getVectorNumElements() const
getVectorNumElements - Given a vector type, return the number of elements it contains.
This class is used to represent ISD::LOAD nodes.
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary...
static bool isImmHL(uint64_t Val)