26#include "llvm/IR/IntrinsicsS390.h"
35#define DEBUG_TYPE "systemz-lower"
41 cl::desc(
"Verify that narrow int args are properly extended per the "
48 : Op0(Op0In), Op1(Op1In), Chain(ChainIn),
49 Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {}
99 if (Subtarget.hasHighWord())
105 if (Subtarget.hasVector()) {
112 if (Subtarget.hasVectorEnhancements1())
117 if (Subtarget.hasVector()) {
126 if (Subtarget.hasVector())
153 for (
unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
154 I <= MVT::LAST_FP_VALUETYPE;
180 for (
unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
181 I <= MVT::LAST_INTEGER_VALUETYPE;
212 if (Subtarget.hasPopulationCount())
231 if (!Subtarget.hasFPExtension())
237 if (Subtarget.hasFPExtension())
242 if (Subtarget.hasFPExtension())
258 if (!Subtarget.hasVectorEnhancements3()) {
285 if (Subtarget.hasVectorEnhancements3()) {
325 {MVT::i8, MVT::i16, MVT::i32},
Legal);
327 {MVT::i8, MVT::i16},
Legal);
344 if (!Subtarget.hasFPExtension()) {
357 if (Subtarget.hasMiscellaneousExtensions4()) {
364 if (Subtarget.hasMiscellaneousExtensions3()) {
457 if (VT != MVT::v2i64 || Subtarget.hasVectorEnhancements3())
459 if (Subtarget.hasVectorEnhancements3() &&
460 VT != MVT::v16i8 && VT != MVT::v8i16) {
470 if (Subtarget.hasVectorEnhancements1())
501 if (Subtarget.hasVector()) {
523 if (Subtarget.hasVectorEnhancements2()) {
544 for (
unsigned I = MVT::FIRST_FP_VALUETYPE;
545 I <= MVT::LAST_FP_VALUETYPE;
553 if (Subtarget.hasFPExtension()) {
581 if (Subtarget.hasFPExtension()) {
592 if (Subtarget.hasVector()) {
638 if (Subtarget.hasVectorEnhancements1()) {
645 if (Subtarget.hasVectorEnhancements1()) {
699 for (
auto VT : { MVT::f32, MVT::f64, MVT::f128,
700 MVT::v4f32, MVT::v2f64 }) {
709 if (!Subtarget.hasVectorEnhancements1()) {
715 if (Subtarget.hasVectorEnhancements1())
725 if (Subtarget.hasVectorEnhancements1()) {
737 if (!Subtarget.hasVector()) {
803 struct RTLibCallMapping {
807 static RTLibCallMapping RTLibCallCommon[] = {
808#define HANDLE_LIBCALL(code, name) {RTLIB::code, name},
809#include "ZOSLibcallNames.def"
811 for (
auto &E : RTLibCallCommon)
817 return Subtarget.hasSoftFloat();
842 return Subtarget.hasVectorEnhancements1();
855 if (!Subtarget.hasVector() ||
856 (isFP128 && !Subtarget.hasVectorEnhancements1()))
878 if (SplatBitSize > 64)
884 if (isInt<16>(SignedValue)) {
893 if (
TII->isRxSBGMask(
Value, SplatBitSize, Start,
End)) {
915 uint64_t Lower = SplatUndefZ & maskTrailingOnes<uint64_t>(LowerBits);
916 uint64_t Upper = SplatUndefZ & maskLeadingOnes<uint64_t>(UpperBits);
923 uint64_t Middle = SplatUndefZ & ~Upper & ~Lower;
924 return tryValue(SplatBitsZ | Middle);
939 unsigned HalfSize = Width / 2;
944 if (HighValue != LowValue || 8 > HalfSize)
947 SplatBits = HighValue;
951 SplatBitSize = Width;
959 BVN->
isConstantSplat(IntBits, SplatUndef, SplatBitSize, HasAnyUndefs, 128,
963 BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, 8,
968 bool ForCodeSize)
const {
970 if (Imm.isZero() || Imm.isNegZero())
991 assert(
TRI->isTypeLegalForClass(*RC, MVT::i32) &&
"Invalid destination!");
993 Register mainDstReg =
MRI.createVirtualRegister(RC);
994 Register restoreDstReg =
MRI.createVirtualRegister(RC);
997 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
"Invalid Pointer Size!");
1050 const int64_t FPOffset = 0;
1059 unsigned LabelReg =
MRI.createVirtualRegister(PtrRC);
1075 .
addReg(SpecialRegs->getFramePointerRegister())
1083 .
addReg(SpecialRegs->getStackPointerRegister())
1091 Register BCReg =
MRI.createVirtualRegister(PtrRC);
1094 .
addReg(SpecialRegs->getStackPointerRegister())
1095 .
addImm(TFL->getBackchainOffset(*MF))
1106 MIB =
BuildMI(*thisMBB,
MI,
DL,
TII->get(SystemZ::EH_SjLj_Setup))
1131 MI.eraseFromParent();
1147 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
"Invalid Pointer Size!");
1157 const int64_t FPOffset = 0;
1169 SpecialRegs->getFramePointerRegister())
1191 SpecialRegs->getStackPointerRegister())
1200 .
addReg(SpecialRegs->getStackPointerRegister())
1201 .
addImm(TFL->getBackchainOffset(*MF))
1207 MI.eraseFromParent();
1237 if (Subtarget.hasInterlockedAccess1() &&
1251 return isInt<32>(Imm) || isUInt<32>(Imm);
1256 return isUInt<32>(Imm) || isUInt<32>(-Imm);
1278 LongDisplacement(LongDispl), IndexReg(IdxReg) {}
1301 switch (
II->getIntrinsicID()) {
1303 case Intrinsic::memset:
1304 case Intrinsic::memmove:
1305 case Intrinsic::memcpy:
1310 if (isa<LoadInst>(
I) &&
I->hasOneUse()) {
1311 auto *SingleUser = cast<Instruction>(*
I->user_begin());
1312 if (SingleUser->getParent() ==
I->getParent()) {
1313 if (isa<ICmpInst>(SingleUser)) {
1314 if (
auto *
C = dyn_cast<ConstantInt>(SingleUser->getOperand(1)))
1315 if (
C->getBitWidth() <= 64 &&
1316 (isInt<16>(
C->getSExtValue()) || isUInt<16>(
C->getZExtValue())))
1319 }
else if (isa<StoreInst>(SingleUser))
1323 }
else if (
auto *StoreI = dyn_cast<StoreInst>(
I)) {
1324 if (
auto *LoadI = dyn_cast<LoadInst>(StoreI->getValueOperand()))
1325 if (LoadI->hasOneUse() && LoadI->getParent() ==
I->getParent())
1330 if (HasVector && (isa<LoadInst>(
I) || isa<StoreInst>(
I))) {
1338 Type *MemAccessTy = (isa<LoadInst>(
I) ?
I->getType() :
1339 I->getOperand(0)->getType());
1341 bool IsVectorAccess = MemAccessTy->
isVectorTy();
1345 if (!IsVectorAccess && isa<StoreInst>(
I)) {
1346 Value *DataOp =
I->getOperand(0);
1347 if (isa<ExtractElementInst>(DataOp))
1348 IsVectorAccess =
true;
1353 if (!IsVectorAccess && isa<LoadInst>(
I) &&
I->hasOneUse()) {
1354 User *LoadUser = *
I->user_begin();
1355 if (isa<InsertElementInst>(LoadUser))
1356 IsVectorAccess =
true;
1359 if (IsFPAccess || IsVectorAccess)
1388 return AM.
Scale == 0;
1395 std::vector<EVT> &MemOps,
unsigned Limit,
const MemOp &
Op,
unsigned DstAS,
1396 unsigned SrcAS,
const AttributeList &FuncAttributes)
const {
1397 const int MVCFastLen = 16;
1399 if (Limit != ~
unsigned(0)) {
1401 if (
Op.isMemcpy() &&
Op.allowOverlap() &&
Op.size() <= MVCFastLen)
1403 if (
Op.isMemset() &&
Op.size() - 1 <= MVCFastLen)
1405 if (
Op.isZeroMemset())
1410 SrcAS, FuncAttributes);
1415 return Subtarget.hasVector() ? MVT::v2i64 : MVT::Other;
1419 if (!FromType->isIntegerTy() || !ToType->
isIntegerTy())
1421 unsigned FromBits = FromType->getPrimitiveSizeInBits().getFixedValue();
1423 return FromBits > ToBits;
1431 return FromBits > ToBits;
1440 if (Constraint.
size() == 1) {
1441 switch (Constraint[0]) {
1467 }
else if (Constraint.
size() == 2 && Constraint[0] ==
'Z') {
1468 switch (Constraint[1]) {
1484 const char *constraint)
const {
1486 Value *CallOperandVal =
info.CallOperandVal;
1489 if (!CallOperandVal)
1493 switch (*constraint) {
1511 if (Subtarget.hasVector())
1517 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1518 if (isUInt<8>(
C->getZExtValue()))
1523 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1524 if (isUInt<12>(
C->getZExtValue()))
1529 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1530 if (isInt<16>(
C->getSExtValue()))
1535 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1536 if (isInt<20>(
C->getSExtValue()))
1541 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1542 if (
C->getZExtValue() == 0x7fffffff)
1552static std::pair<unsigned, const TargetRegisterClass *>
1554 const unsigned *Map,
unsigned Size) {
1555 assert(*(Constraint.
end()-1) ==
'}' &&
"Missing '}'");
1556 if (isdigit(Constraint[2])) {
1561 return std::make_pair(Map[Index], RC);
1563 return std::make_pair(0U,
nullptr);
1566std::pair<unsigned, const TargetRegisterClass *>
1569 if (Constraint.
size() == 1) {
1571 switch (Constraint[0]) {
1576 return std::make_pair(0U, &SystemZ::GR64BitRegClass);
1578 return std::make_pair(0U, &SystemZ::GR128BitRegClass);
1579 return std::make_pair(0U, &SystemZ::GR32BitRegClass);
1583 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass);
1584 else if (VT == MVT::i128)
1585 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass);
1586 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass);
1589 return std::make_pair(0U, &SystemZ::GRH32BitRegClass);
1594 return std::make_pair(0U, &SystemZ::FP64BitRegClass);
1596 return std::make_pair(0U, &SystemZ::FP128BitRegClass);
1597 return std::make_pair(0U, &SystemZ::FP32BitRegClass);
1602 if (Subtarget.hasVector()) {
1604 return std::make_pair(0U, &SystemZ::VR32BitRegClass);
1606 return std::make_pair(0U, &SystemZ::VR64BitRegClass);
1607 return std::make_pair(0U, &SystemZ::VR128BitRegClass);
1616 auto getVTSizeInBits = [&VT]() {
1624 if (Constraint[1] ==
'r') {
1625 if (getVTSizeInBits() == 32)
1628 if (getVTSizeInBits() == 128)
1634 if (Constraint[1] ==
'f') {
1636 return std::make_pair(
1638 if (getVTSizeInBits() == 32)
1641 if (getVTSizeInBits() == 128)
1647 if (Constraint[1] ==
'v') {
1648 if (!Subtarget.hasVector())
1649 return std::make_pair(
1651 if (getVTSizeInBits() == 32)
1654 if (getVTSizeInBits() == 64)
1672 : SystemZ::NoRegister)
1674 Subtarget.
isTargetELF() ? SystemZ::R15D : SystemZ::NoRegister)
1675 .
Default(SystemZ::NoRegister);
1683 const Constant *PersonalityFn)
const {
1688 const Constant *PersonalityFn)
const {
1696 if (Constraint.
size() == 1) {
1697 switch (Constraint[0]) {
1699 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1700 if (isUInt<8>(
C->getZExtValue()))
1702 Op.getValueType()));
1706 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1707 if (isUInt<12>(
C->getZExtValue()))
1709 Op.getValueType()));
1713 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1714 if (isInt<16>(
C->getSExtValue()))
1716 C->getSExtValue(),
SDLoc(
Op),
Op.getValueType()));
1720 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1721 if (isInt<20>(
C->getSExtValue()))
1723 C->getSExtValue(),
SDLoc(
Op),
Op.getValueType()));
1727 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1728 if (
C->getZExtValue() == 0x7fffffff)
1730 Op.getValueType()));
1741#include "SystemZGenCallingConv.inc"
1745 static const MCPhysReg ScratchRegs[] = { SystemZ::R0D, SystemZ::R1D,
1751 Type *ToType)
const {
1814 if (BitCastToType == MVT::v2i64)
1841 MVT::Untyped,
Hi,
Lo);
1865 unsigned NumParts,
MVT PartVT, std::optional<CallingConv::ID>
CC)
const {
1867 if (ValueVT.
getSizeInBits() == 128 && NumParts == 1 && PartVT == MVT::Untyped) {
1878 MVT PartVT,
EVT ValueVT, std::optional<CallingConv::ID>
CC)
const {
1879 if (ValueVT.
getSizeInBits() == 128 && NumParts == 1 && PartVT == MVT::Untyped) {
1906 unsigned NumFixedGPRs = 0;
1907 unsigned NumFixedFPRs = 0;
1908 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
1921 RC = &SystemZ::GR32BitRegClass;
1925 RC = &SystemZ::GR64BitRegClass;
1929 RC = &SystemZ::FP32BitRegClass;
1933 RC = &SystemZ::FP64BitRegClass;
1937 RC = &SystemZ::FP128BitRegClass;
1945 RC = &SystemZ::VR128BitRegClass;
1974 ArgValue = DAG.
getLoad(LocVT,
DL, Chain, FIN,
1985 unsigned ArgIndex = Ins[
I].OrigArgIndex;
1986 assert (Ins[
I].PartOffset == 0);
1987 while (
I + 1 != E && Ins[
I + 1].OrigArgIndex == ArgIndex) {
1989 unsigned PartOffset = Ins[
I + 1].PartOffset;
2012 int64_t VarArgOffset = CCInfo.
getStackSize() + Regs->getCallFrameSize();
2030 int64_t RegSaveOffset =
2045 &SystemZ::FP64BitRegClass);
2063 MRI.addLiveIn(Regs->getADARegister(), ADAvReg);
2075 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
2082 if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D)
2084 if (Outs[
I].Flags.isSwiftSelf() || Outs[
I].Flags.isSwiftError())
2091 unsigned Offset,
bool LoadAdr =
false) {
2114 bool LoadAddr =
false;
2115 const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV);
2136 unsigned ADADelta = 0;
2137 unsigned EPADelta = 8;
2142 if (
auto *
G = dyn_cast<GlobalAddressSDNode>(Callee)) {
2143 bool IsInternal = (
G->getGlobal()->hasInternalLinkage() ||
2144 G->getGlobal()->hasPrivateLinkage());
2159 }
else if (
auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
2201 verifyNarrowIntegerArgs_Call(Outs, &MF.
getFunction(), Callee);
2224 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
2230 unsigned ArgIndex = Outs[
I].OrigArgIndex;
2232 if (
I + 1 != E && Outs[
I + 1].OrigArgIndex == ArgIndex) {
2234 Type *OrigArgType = CLI.
Args[Outs[
I].OrigArgIndex].Ty;
2240 SlotVT = Outs[
I].VT;
2243 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
2249 assert (Outs[
I].PartOffset == 0);
2250 while (
I + 1 != E && Outs[
I + 1].OrigArgIndex == ArgIndex) {
2251 SDValue PartValue = OutVals[
I + 1];
2252 unsigned PartOffset = Outs[
I + 1].PartOffset;
2259 SlotVT.
getStoreSize()) &&
"Not enough space for argument part!");
2262 ArgValue = SpillSlot;
2279 if (!StackPtr.getNode())
2301 RegsToPass.
push_back(std::make_pair(SystemZ::R3D, ShadowArgValue));
2307 if (!MemOpChains.
empty())
2320 ->getAddressOfCalleeRegister();
2323 Callee = DAG.
getRegister(CalleeReg, Callee.getValueType());
2328 if (
auto *
G = dyn_cast<GlobalAddressSDNode>(Callee)) {
2331 }
else if (
auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
2334 }
else if (IsTailCall) {
2337 Callee = DAG.
getRegister(SystemZ::R1D, Callee.getValueType());
2342 for (
unsigned I = 0, E = RegsToPass.
size();
I != E; ++
I) {
2344 RegsToPass[
I].second, Glue);
2355 for (
unsigned I = 0, E = RegsToPass.
size();
I != E; ++
I)
2357 RegsToPass[
I].second.getValueType()));
2361 const uint32_t *Mask =
TRI->getCallPreservedMask(MF, CallConv);
2362 assert(Mask &&
"Missing call preserved mask for calling convention");
2386 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, Ctx);
2393 VA.getLocVT(), Glue);
2410 bool DoesNotReturn,
bool IsReturnValueUsed)
const {
2412 Args.reserve(Ops.
size());
2417 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.
getContext());
2419 Entry.IsZExt = !Entry.IsSExt;
2420 Args.push_back(Entry);
2447 for (
auto &Out : Outs)
2448 if (Out.ArgVT == MVT::i128)
2452 CCState RetCCInfo(CallConv, isVarArg, MF, RetLocs, Context);
2453 return RetCCInfo.
CheckReturn(Outs, RetCC_SystemZ);
2465 verifyNarrowIntegerArgs_Ret(Outs, &MF.
getFunction());
2473 if (RetLocs.
empty())
2483 for (
unsigned I = 0, E = RetLocs.
size();
I != E; ++
I) {
2512 unsigned &CCValid) {
2513 unsigned Id =
Op.getConstantOperandVal(1);
2515 case Intrinsic::s390_tbegin:
2520 case Intrinsic::s390_tbegin_nofloat:
2525 case Intrinsic::s390_tend:
2539 unsigned Id =
Op.getConstantOperandVal(0);
2541 case Intrinsic::s390_vpkshs:
2542 case Intrinsic::s390_vpksfs:
2543 case Intrinsic::s390_vpksgs:
2548 case Intrinsic::s390_vpklshs:
2549 case Intrinsic::s390_vpklsfs:
2550 case Intrinsic::s390_vpklsgs:
2555 case Intrinsic::s390_vceqbs:
2556 case Intrinsic::s390_vceqhs:
2557 case Intrinsic::s390_vceqfs:
2558 case Intrinsic::s390_vceqgs:
2559 case Intrinsic::s390_vceqqs:
2564 case Intrinsic::s390_vchbs:
2565 case Intrinsic::s390_vchhs:
2566 case Intrinsic::s390_vchfs:
2567 case Intrinsic::s390_vchgs:
2568 case Intrinsic::s390_vchqs:
2573 case Intrinsic::s390_vchlbs:
2574 case Intrinsic::s390_vchlhs:
2575 case Intrinsic::s390_vchlfs:
2576 case Intrinsic::s390_vchlgs:
2577 case Intrinsic::s390_vchlqs:
2582 case Intrinsic::s390_vtm:
2587 case Intrinsic::s390_vfaebs:
2588 case Intrinsic::s390_vfaehs:
2589 case Intrinsic::s390_vfaefs:
2594 case Intrinsic::s390_vfaezbs:
2595 case Intrinsic::s390_vfaezhs:
2596 case Intrinsic::s390_vfaezfs:
2601 case Intrinsic::s390_vfeebs:
2602 case Intrinsic::s390_vfeehs:
2603 case Intrinsic::s390_vfeefs:
2608 case Intrinsic::s390_vfeezbs:
2609 case Intrinsic::s390_vfeezhs:
2610 case Intrinsic::s390_vfeezfs:
2615 case Intrinsic::s390_vfenebs:
2616 case Intrinsic::s390_vfenehs:
2617 case Intrinsic::s390_vfenefs:
2622 case Intrinsic::s390_vfenezbs:
2623 case Intrinsic::s390_vfenezhs:
2624 case Intrinsic::s390_vfenezfs:
2629 case Intrinsic::s390_vistrbs:
2630 case Intrinsic::s390_vistrhs:
2631 case Intrinsic::s390_vistrfs:
2636 case Intrinsic::s390_vstrcbs:
2637 case Intrinsic::s390_vstrchs:
2638 case Intrinsic::s390_vstrcfs:
2643 case Intrinsic::s390_vstrczbs:
2644 case Intrinsic::s390_vstrczhs:
2645 case Intrinsic::s390_vstrczfs:
2650 case Intrinsic::s390_vstrsb:
2651 case Intrinsic::s390_vstrsh:
2652 case Intrinsic::s390_vstrsf:
2657 case Intrinsic::s390_vstrszb:
2658 case Intrinsic::s390_vstrszh:
2659 case Intrinsic::s390_vstrszf:
2664 case Intrinsic::s390_vfcedbs:
2665 case Intrinsic::s390_vfcesbs:
2670 case Intrinsic::s390_vfchdbs:
2671 case Intrinsic::s390_vfchsbs:
2676 case Intrinsic::s390_vfchedbs:
2677 case Intrinsic::s390_vfchesbs:
2682 case Intrinsic::s390_vftcidb:
2683 case Intrinsic::s390_vftcisb:
2688 case Intrinsic::s390_tdc:
2706 for (
unsigned I = 2;
I < NumOps; ++
I)
2709 assert(
Op->getNumValues() == 2 &&
"Expected only CC result and chain");
2715 return Intr.getNode();
2725 for (
unsigned I = 1;
I < NumOps; ++
I)
2729 return Intr.getNode();
2739 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \
2740 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \
2741 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X
2766 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1.getNode());
2767 if (!ConstOp1 || ConstOp1->getValueSizeInBits(0) > 64)
2770 int64_t
Value = ConstOp1->getSExtValue();
2786 if (!
C.Op0.hasOneUse() ||
2792 auto *Load = cast<LoadSDNode>(
C.Op0);
2793 unsigned NumBits = Load->getMemoryVT().getSizeInBits();
2794 if ((NumBits != 8 && NumBits != 16) ||
2795 NumBits != Load->getMemoryVT().getStoreSizeInBits())
2800 auto *ConstOp1 = cast<ConstantSDNode>(
C.Op1);
2801 if (!ConstOp1 || ConstOp1->getValueSizeInBits(0) > 64)
2804 uint64_t Mask = (1 << NumBits) - 1;
2807 int64_t SignedValue = ConstOp1->getSExtValue();
2814 }
else if (NumBits == 8) {
2840 if (
C.Op0.getValueType() != MVT::i32 ||
2841 Load->getExtensionType() != ExtType) {
2843 Load->getBasePtr(), Load->getPointerInfo(),
2844 Load->getMemoryVT(), Load->getAlign(),
2845 Load->getMemOperand()->getFlags());
2851 if (
C.Op1.getValueType() != MVT::i32 ||
2852 Value != ConstOp1->getZExtValue())
2859 auto *Load = dyn_cast<LoadSDNode>(
Op.getNode());
2862 if (Load->getMemoryVT() == MVT::i8)
2865 switch (Load->getExtensionType()) {
2882 if (
C.Op0.getValueType() == MVT::i128)
2884 if (
C.Op0.getValueType() == MVT::f128)
2890 if (isa<ConstantFPSDNode>(
C.Op1))
2895 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1);
2896 if (ConstOp1 && ConstOp1->getZExtValue() == 0)
2914 isUInt<16>(ConstOp1->getZExtValue()))
2919 isInt<16>(ConstOp1->getSExtValue()))
2925 unsigned Opcode0 =
C.Op0.getOpcode();
2932 C.Op0.getConstantOperandVal(1) == 0xffffffff)
2947 ((
N->getOperand(0) ==
C.Op0 &&
N->getOperand(1) ==
C.Op1) ||
2948 (
N->getOperand(0) ==
C.Op1 &&
N->getOperand(1) ==
C.Op0))) {
2969 auto *C1 = dyn_cast<ConstantFPSDNode>(
C.Op1);
2970 if (C1 && C1->isZero()) {
2989 if (
C.Op0.getOpcode() ==
ISD::SHL &&
C.Op0.getValueType() == MVT::i64 &&
2991 auto *C1 = dyn_cast<ConstantSDNode>(
C.Op0.getOperand(1));
2992 if (C1 && C1->getZExtValue() == 32) {
2993 SDValue ShlOp0 =
C.Op0.getOperand(0);
2997 cast<VTSDNode>(
N->getOperand(1))->getVT() == MVT::i32) {
3012 C.Op0.getOperand(0).getOpcode() ==
ISD::LOAD &&
3014 cast<ConstantSDNode>(
C.Op1)->getValueSizeInBits(0) <= 64 &&
3015 C.Op1->getAsZExtVal() == 0) {
3016 auto *L = cast<LoadSDNode>(
C.Op0.getOperand(0));
3017 if (L->getMemoryVT().getStoreSizeInBits().getFixedValue() <=
3018 C.Op0.getValueSizeInBits().getFixedValue()) {
3019 unsigned Type = L->getExtensionType();
3022 C.Op0 =
C.Op0.getOperand(0);
3032 auto *Shift = dyn_cast<ConstantSDNode>(
N.getOperand(1));
3036 uint64_t Amount = Shift->getZExtValue();
3037 if (Amount >=
N.getValueSizeInBits())
3052 unsigned ICmpType) {
3053 assert(Mask != 0 &&
"ANDs with zero should have been removed by now");
3075 if (EffectivelyUnsigned && CmpVal > 0 && CmpVal <=
Low) {
3081 if (EffectivelyUnsigned && CmpVal <
Low) {
3089 if (CmpVal == Mask) {
3095 if (EffectivelyUnsigned && CmpVal >= Mask -
Low && CmpVal < Mask) {
3101 if (EffectivelyUnsigned && CmpVal > Mask -
Low && CmpVal <= Mask) {
3109 if (EffectivelyUnsigned && CmpVal >= Mask -
High && CmpVal <
High) {
3115 if (EffectivelyUnsigned && CmpVal > Mask -
High && CmpVal <=
High) {
3144 if (
C.Op0.getValueType() == MVT::i128) {
3149 auto *Mask = dyn_cast<ConstantSDNode>(
C.Op1);
3150 if (Mask && Mask->getAPIntValue() == 0) {
3165 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1);
3168 uint64_t CmpVal = ConstOp1->getZExtValue();
3175 NewC.Op0 =
C.Op0.getOperand(0);
3176 NewC.Op1 =
C.Op0.getOperand(1);
3177 Mask = dyn_cast<ConstantSDNode>(NewC.Op1);
3180 MaskVal = Mask->getZExtValue();
3185 if (NewC.Op0.getValueType() != MVT::i64 ||
3200 MaskVal = -(CmpVal & -CmpVal);
3208 unsigned BitSize = NewC.Op0.getValueSizeInBits();
3209 unsigned NewCCMask, ShiftVal;
3211 NewC.Op0.getOpcode() ==
ISD::SHL &&
3213 (MaskVal >> ShiftVal != 0) &&
3214 ((CmpVal >> ShiftVal) << ShiftVal) == CmpVal &&
3216 MaskVal >> ShiftVal,
3219 NewC.Op0 = NewC.Op0.getOperand(0);
3220 MaskVal >>= ShiftVal;
3222 NewC.Op0.getOpcode() ==
ISD::SRL &&
3224 (MaskVal << ShiftVal != 0) &&
3225 ((CmpVal << ShiftVal) >> ShiftVal) == CmpVal &&
3227 MaskVal << ShiftVal,
3230 NewC.Op0 = NewC.Op0.getOperand(0);
3231 MaskVal <<= ShiftVal;
3242 if (Mask && Mask->getZExtValue() == MaskVal)
3247 C.CCMask = NewCCMask;
3255 if (
C.Op0.getValueType() != MVT::i128)
3275 bool Swap =
false, Invert =
false;
3294 C.CCMask ^=
C.CCValid;
3304 auto *Mask = dyn_cast<ConstantSDNode>(
C.Op0.getOperand(1));
3305 if (!Mask || Mask->getValueSizeInBits(0) > 64)
3308 if ((~Known.
Zero).getZExtValue() & ~Mask->getZExtValue())
3311 C.Op0 =
C.Op0.getOperand(0);
3323 C.CCValid = CCValid;
3326 C.CCMask =
CC < 4 ? 1 << (3 -
CC) : 0;
3329 C.CCMask =
CC < 4 ? ~(1 << (3 -
CC)) : -1;
3333 C.CCMask =
CC < 4 ? ~0U << (4 -
CC) : -1;
3336 C.CCMask =
CC < 4 ? ~(~0U << (4 -
CC)) : 0;
3340 C.CCMask =
CC < 4 ? ~0U << (3 -
CC) : -1;
3343 C.CCMask =
CC < 4 ? ~(~0U << (3 -
CC)) : 0;
3346 C.CCMask &= CCValid;
3354 bool IsSignaling =
false) {
3357 unsigned Opcode, CCValid;
3369 Comparison
C(CmpOp0, CmpOp1, Chain);
3371 if (
C.Op0.getValueType().isFloatingPoint()) {
3375 else if (!IsSignaling)
3397 C.CCMask &= ~SystemZ::CCMASK_CMP_UO;
3418 if (!
C.Op1.getNode()) {
3420 switch (
C.Op0.getOpcode()) {
3447 return DAG.
getNode(
C.Opcode,
DL, VTs,
C.Chain,
C.Op0,
C.Op1);
3449 return DAG.
getNode(
C.Opcode,
DL, MVT::i32,
C.Op0,
C.Op1);
3458 Op0 = DAG.
getNode(Extend,
DL, MVT::i64, Op0);
3459 Op1 = DAG.
getNode(Extend,
DL, MVT::i64, Op1);
3484 unsigned CCValid,
unsigned CCMask) {
3513 case CmpMode::Int:
return 0;
3533 case CmpMode::FP:
return 0;
3534 case CmpMode::StrictFP:
return 0;
3535 case CmpMode::SignalingFP:
return 0;
3567 int Mask[] = { Start, -1, Start + 1, -1 };
3587 !Subtarget.hasVectorEnhancements1()) {
3601 SDValue Ops[2] = { Res, NewChain };
3610 return DAG.
getNode(Opcode,
DL, VTs, Chain, CmpOp0, CmpOp1);
3612 return DAG.
getNode(Opcode,
DL, VT, CmpOp0, CmpOp1);
3625 bool IsSignaling)
const {
3628 assert (!IsSignaling || Chain);
3629 CmpMode Mode = IsSignaling ? CmpMode::SignalingFP :
3630 Chain ? CmpMode::StrictFP : IsFP ? CmpMode::FP : CmpMode::Int;
3631 bool Invert =
false;
3639 assert(IsFP &&
"Unexpected integer comparison");
3641 DL, VT, CmpOp1, CmpOp0, Chain);
3643 DL, VT, CmpOp0, CmpOp1, Chain);
3647 LT.getValue(1),
GE.getValue(1));
3656 assert(IsFP &&
"Unexpected integer comparison");
3658 DL, VT, CmpOp1, CmpOp0, Chain);
3660 DL, VT, CmpOp0, CmpOp1, Chain);
3664 LT.getValue(1),
GT.getValue(1));
3685 Cmp = getVectorCmp(DAG, Opcode,
DL, VT, CmpOp0, CmpOp1, Chain);
3689 Cmp = getVectorCmp(DAG, Opcode,
DL, VT, CmpOp1, CmpOp0, Chain);
3694 Chain =
Cmp.getValue(1);
3702 if (Chain && Chain.
getNode() !=
Cmp.getNode()) {
3715 EVT VT =
Op.getValueType();
3717 return lowerVectorSETCC(DAG,
DL, VT,
CC, CmpOp0, CmpOp1);
3726 bool IsSignaling)
const {
3732 EVT VT =
Op.getNode()->getValueType(0);
3734 SDValue Res = lowerVectorSETCC(DAG,
DL, VT,
CC, CmpOp0, CmpOp1,
3735 Chain, IsSignaling);
3791 C.CCMask ^=
C.CCValid;
3835 cast<ConstantSDNode>(
C.Op1)->getValueSizeInBits(0) <= 64 &&
3836 C.Op1->getAsZExtVal() == 0) {
3843 if (Subtarget.hasVectorEnhancements3() &&
3845 C.Op0.getValueType() == MVT::i128 &&
3851 SDValue Ops[] = {TrueOp, FalseOp,
3925 Chain = DAG.
getCopyToReg(Chain,
DL, SystemZ::R2D, GOTOffset, Glue);
3932 Node->getValueType(0),
3944 assert(Mask &&
"Missing call preserved mask for calling convention");
3952 Chain = DAG.
getNode(Opcode,
DL, NodeTys, Ops);
3959SDValue SystemZTargetLowering::lowerThreadPointer(
const SDLoc &
DL,
3991 SDValue TP = lowerThreadPointer(
DL, DAG);
4099 if (
CP->isMachineConstantPoolEntry())
4118 unsigned Depth =
Op.getConstantOperandVal(0);
4125 int BackChainIdx = TFL->getOrCreateFramePointerSaveIndex(MF);
4154 unsigned Depth =
Op.getConstantOperandVal(0);
4162 SDValue FrameAddr = lowerFRAMEADDR(
Op, DAG);
4164 int Offset = TFL->getReturnAddressOffset(MF);
4175 &SystemZ::GR64BitRegClass);
4183 EVT InVT =
In.getValueType();
4184 EVT ResVT =
Op.getValueType();
4189 if (
auto *LoadN = dyn_cast<LoadSDNode>(In))
4192 LoadN->getBasePtr(), LoadN->getMemOperand());
4198 if (InVT == MVT::i32 && ResVT == MVT::f32) {
4200 if (Subtarget.hasHighWord()) {
4204 MVT::i64,
SDValue(U64, 0), In);
4212 DL, MVT::f32, Out64);
4214 if (InVT == MVT::f32 && ResVT == MVT::i32) {
4217 MVT::f64,
SDValue(U64, 0), In);
4219 if (Subtarget.hasHighWord())
4233 return lowerVASTART_XPLINK(
Op, DAG);
4235 return lowerVASTART_ELF(
Op, DAG);
4250 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
4264 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
4268 const unsigned NumFields = 4;
4279 for (
unsigned I = 0;
I < NumFields; ++
I) {
4284 MemOps[
I] = DAG.
getStore(Chain,
DL, Fields[
I], FieldAddr,
4296 const Value *DstSV = cast<SrcValueSDNode>(
Op.getOperand(3))->getValue();
4297 const Value *SrcSV = cast<SrcValueSDNode>(
Op.getOperand(4))->getValue();
4303 Align(8),
false,
false,
4309SystemZTargetLowering::lowerDYNAMIC_STACKALLOC(
SDValue Op,
4312 return lowerDYNAMIC_STACKALLOC_XPLINK(
Op, DAG);
4314 return lowerDYNAMIC_STACKALLOC_ELF(
Op, DAG);
4318SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_XPLINK(
SDValue Op,
4330 uint64_t AlignVal = (RealignOpt ?
Align->getAsZExtVal() : 0);
4333 uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
4334 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;
4340 if (ExtraAlignSpace)
4344 bool IsSigned =
false;
4345 bool DoesNotReturn =
false;
4346 bool IsReturnValueUsed =
false;
4347 EVT VT =
Op.getValueType();
4369 if (ExtraAlignSpace) {
4381SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_ELF(
SDValue Op,
4395 uint64_t AlignVal = (RealignOpt ?
Align->getAsZExtVal() : 0);
4398 uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
4399 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;
4410 Backchain = DAG.
getLoad(MVT::i64,
DL, Chain, getBackchainAddress(OldSP, DAG),
4414 if (ExtraAlignSpace)
4422 DAG.
getVTList(MVT::i64, MVT::Other), Chain, OldSP, NeededSpace);
4438 if (RequiredAlign > StackAlign) {
4448 Chain = DAG.
getStore(Chain,
DL, Backchain, getBackchainAddress(NewSP, DAG),
4455SDValue SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET(
4464 unsigned Opcode)
const {
4465 EVT VT =
Op.getValueType();
4471 assert(Subtarget.hasMiscellaneousExtensions2());
4476 Op.getOperand(0),
Op.getOperand(1), Even, Odd);
4482 EVT VT =
Op.getValueType();
4489 Op.getOperand(1), Ops[1], Ops[0]);
4490 else if (Subtarget.hasMiscellaneousExtensions2())
4495 Op.getOperand(0),
Op.getOperand(1), Ops[1], Ops[0]);
4519 LL, RL, Ops[1], Ops[0]);
4530 EVT VT =
Op.getValueType();
4537 Op.getOperand(1), Ops[1], Ops[0]);
4543 Op.getOperand(0),
Op.getOperand(1), Ops[1], Ops[0]);
4551 EVT VT =
Op.getValueType();
4571 EVT VT =
Op.getValueType();
4578 Op.getOperand(0),
Op.getOperand(1), Ops[1], Ops[0]);
4583 assert(
Op.getValueType() == MVT::i64 &&
"Should be 64-bit operation");
4586 SDValue Ops[] = {
Op.getOperand(0),
Op.getOperand(1)};
4595 if ((Masks[0] >> 32) == 0xffffffff &&
uint32_t(Masks[1]) == 0xffffffff)
4597 else if ((Masks[1] >> 32) == 0xffffffff &&
uint32_t(Masks[0]) == 0xffffffff)
4613 if (!isInt<16>(
Value))
4634 MVT::i64, HighOp, Low32);
4645 if (
N->getValueType(0) == MVT::i128) {
4646 unsigned BaseOp = 0;
4647 unsigned FlagOp = 0;
4648 bool IsBorrow =
false;
4649 switch (
Op.getOpcode()) {
4672 unsigned BaseOp = 0;
4673 unsigned CCValid = 0;
4674 unsigned CCMask = 0;
4676 switch (
Op.getOpcode()) {
4704 if (
N->getValueType(1) == MVT::i1)
4731 MVT VT =
N->getSimpleValueType(0);
4742 if (VT == MVT::i128) {
4743 unsigned BaseOp = 0;
4744 unsigned FlagOp = 0;
4745 bool IsBorrow =
false;
4746 switch (
Op.getOpcode()) {
4773 unsigned BaseOp = 0;
4774 unsigned CCValid = 0;
4775 unsigned CCMask = 0;
4777 switch (
Op.getOpcode()) {
4806 if (
N->getValueType(1) == MVT::i1)
4814 EVT VT =
Op.getValueType();
4816 Op =
Op.getOperand(0);
4864 if (NumSignificantBits == 0)
4870 BitSize = std::min(BitSize, OrigBitSize);
4879 for (int64_t
I = BitSize / 2;
I >= 8;
I =
I / 2) {
4881 if (BitSize != OrigBitSize)
4918 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
4920 (
Node->getMemoryVT() == MVT::i128 ||
Node->getMemoryVT() == MVT::f128) &&
4921 "Only custom lowering i128 or f128.");
4933 EVT PtrVT =
Addr.getValueType();
4934 EVT WideVT = MVT::i32;
4957 unsigned Opcode)
const {
4958 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
4961 EVT NarrowVT =
Node->getMemoryVT();
4962 EVT WideVT = MVT::i32;
4963 if (NarrowVT == WideVT)
4975 if (
auto *Const = dyn_cast<ConstantSDNode>(Src2)) {
4981 SDValue AlignedAddr, BitShift, NegBitShift;
4999 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift,
5018 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
5019 EVT MemVT =
Node->getMemoryVT();
5020 if (MemVT == MVT::i32 || MemVT == MVT::i64) {
5022 assert(
Op.getValueType() == MemVT &&
"Mismatched VTs");
5023 assert(Subtarget.hasInterlockedAccess1() &&
5024 "Should have been expanded by AtomicExpand pass.");
5030 Node->getChain(),
Node->getBasePtr(), NegSrc2,
5031 Node->getMemOperand());
5040 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
5048 if (
Node->getMemoryVT() == MVT::i128) {
5057 EVT NarrowVT =
Node->getMemoryVT();
5058 EVT WideVT = NarrowVT == MVT::i64 ? MVT::i64 : MVT::i32;
5059 if (NarrowVT == WideVT) {
5061 SDValue Ops[] = { ChainIn,
Addr, CmpVal, SwapVal };
5063 DL, Tys, Ops, NarrowVT, MMO);
5077 SDValue AlignedAddr, BitShift, NegBitShift;
5082 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift,
5085 VTList, Ops, NarrowVT, MMO);
5099SystemZTargetLowering::getTargetMMOFlags(
const Instruction &
I)
const {
5104 if (
auto *SI = dyn_cast<StoreInst>(&
I))
5107 if (
auto *LI = dyn_cast<LoadInst>(&
I))
5110 if (
auto *AI = dyn_cast<AtomicRMWInst>(&
I))
5113 if (
auto *AI = dyn_cast<AtomicCmpXchgInst>(&
I))
5125 "in GHC calling convention");
5127 Regs->getStackPointerRegister(),
Op.getValueType());
5138 "in GHC calling convention");
5145 if (StoreBackchain) {
5147 Chain,
DL, Regs->getStackPointerRegister(), MVT::i64);
5148 Backchain = DAG.
getLoad(MVT::i64,
DL, Chain, getBackchainAddress(OldSP, DAG),
5152 Chain = DAG.
getCopyToReg(Chain,
DL, Regs->getStackPointerRegister(), NewSP);
5155 Chain = DAG.
getStore(Chain,
DL, Backchain, getBackchainAddress(NewSP, DAG),
5163 bool IsData =
Op.getConstantOperandVal(4);
5166 return Op.getOperand(0);
5169 bool IsWrite =
Op.getConstantOperandVal(2);
5171 auto *
Node = cast<MemIntrinsicSDNode>(
Op.getNode());
5175 Node->getVTList(), Ops,
5176 Node->getMemoryVT(),
Node->getMemOperand());
5188SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(
SDValue Op,
5190 unsigned Opcode, CCValid;
5192 assert(
Op->getNumValues() == 2 &&
"Expected only CC result and chain");
5203SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(
SDValue Op,
5205 unsigned Opcode, CCValid;
5208 if (
Op->getNumValues() == 1)
5210 assert(
Op->getNumValues() == 2 &&
"Expected a CC and non-CC result");
5215 unsigned Id =
Op.getConstantOperandVal(0);
5217 case Intrinsic::thread_pointer:
5218 return lowerThreadPointer(
SDLoc(
Op), DAG);
5220 case Intrinsic::s390_vpdi:
5222 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5224 case Intrinsic::s390_vperm:
5226 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5228 case Intrinsic::s390_vuphb:
5229 case Intrinsic::s390_vuphh:
5230 case Intrinsic::s390_vuphf:
5231 case Intrinsic::s390_vuphg:
5235 case Intrinsic::s390_vuplhb:
5236 case Intrinsic::s390_vuplhh:
5237 case Intrinsic::s390_vuplhf:
5238 case Intrinsic::s390_vuplhg:
5242 case Intrinsic::s390_vuplb:
5243 case Intrinsic::s390_vuplhw:
5244 case Intrinsic::s390_vuplf:
5245 case Intrinsic::s390_vuplg:
5249 case Intrinsic::s390_vupllb:
5250 case Intrinsic::s390_vupllh:
5251 case Intrinsic::s390_vupllf:
5252 case Intrinsic::s390_vupllg:
5256 case Intrinsic::s390_vsumb:
5257 case Intrinsic::s390_vsumh:
5258 case Intrinsic::s390_vsumgh:
5259 case Intrinsic::s390_vsumgf:
5260 case Intrinsic::s390_vsumqf:
5261 case Intrinsic::s390_vsumqg:
5263 Op.getOperand(1),
Op.getOperand(2));
5265 case Intrinsic::s390_vaq:
5267 Op.getOperand(1),
Op.getOperand(2));
5268 case Intrinsic::s390_vaccb:
5269 case Intrinsic::s390_vacch:
5270 case Intrinsic::s390_vaccf:
5271 case Intrinsic::s390_vaccg:
5272 case Intrinsic::s390_vaccq:
5274 Op.getOperand(1),
Op.getOperand(2));
5275 case Intrinsic::s390_vacq:
5277 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5278 case Intrinsic::s390_vacccq:
5280 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5282 case Intrinsic::s390_vsq:
5284 Op.getOperand(1),
Op.getOperand(2));
5285 case Intrinsic::s390_vscbib:
5286 case Intrinsic::s390_vscbih:
5287 case Intrinsic::s390_vscbif:
5288 case Intrinsic::s390_vscbig:
5289 case Intrinsic::s390_vscbiq:
5291 Op.getOperand(1),
Op.getOperand(2));
5292 case Intrinsic::s390_vsbiq:
5294 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5295 case Intrinsic::s390_vsbcbiq:
5297 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5318 { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 } },
5321 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
5324 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
5327 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
5330 { 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 } },
5333 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
5336 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
5339 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
5342 { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 } },
5345 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
5348 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
5351 { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } },
5354 { 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 } }
5368 OpNo0 = OpNo1 = OpNos[1];
5369 }
else if (OpNos[1] < 0) {
5370 OpNo0 = OpNo1 = OpNos[0];
5388 unsigned &OpNo0,
unsigned &OpNo1) {
5389 int OpNos[] = { -1, -1 };
5402 if (OpNos[ModelOpNo] == 1 - RealOpNo)
5404 OpNos[ModelOpNo] = RealOpNo;
5412 unsigned &OpNo0,
unsigned &OpNo1) {
5429 int Elt = Bytes[
From];
5432 Transform[
From] = -1;
5434 while (
P.Bytes[To] != Elt) {
5439 Transform[
From] = To;
5462 if (
auto *VSN = dyn_cast<ShuffleVectorSDNode>(ShuffleOp)) {
5463 Bytes.
resize(NumElements * BytesPerElement, -1);
5464 for (
unsigned I = 0;
I < NumElements; ++
I) {
5465 int Index = VSN->getMaskElt(
I);
5467 for (
unsigned J = 0; J < BytesPerElement; ++J)
5468 Bytes[
I * BytesPerElement + J] = Index * BytesPerElement + J;
5473 isa<ConstantSDNode>(ShuffleOp.
getOperand(1))) {
5475 Bytes.
resize(NumElements * BytesPerElement, -1);
5476 for (
unsigned I = 0;
I < NumElements; ++
I)
5477 for (
unsigned J = 0; J < BytesPerElement; ++J)
5478 Bytes[
I * BytesPerElement + J] = Index * BytesPerElement + J;
5489 unsigned BytesPerElement,
int &
Base) {
5491 for (
unsigned I = 0;
I < BytesPerElement; ++
I) {
5492 if (Bytes[Start +
I] >= 0) {
5493 unsigned Elem = Bytes[Start +
I];
5497 if (
unsigned(
Base) % Bytes.
size() + BytesPerElement > Bytes.
size())
5499 }
else if (
unsigned(
Base) != Elem -
I)
5512 unsigned &StartIndex,
unsigned &OpNo0,
5514 int OpNos[] = { -1, -1 };
5516 for (
unsigned I = 0;
I < 16; ++
I) {
5517 int Index = Bytes[
I];
5523 Shift = ExpectedShift;
5524 else if (Shift != ExpectedShift)
5528 if (OpNos[ModelOpNo] == 1 - RealOpNo)
5530 OpNos[ModelOpNo] = RealOpNo;
5567 N =
N->getOperand(0);
5569 if (
auto *
Op = dyn_cast<ConstantSDNode>(
N->getOperand(0)))
5570 return Op->getZExtValue() == 0;
5576 for (
unsigned I = 0;
I < Num ;
I++)
5588 for (
unsigned I = 0;
I < 2; ++
I)
5592 unsigned StartIndex, OpNo0, OpNo1;
5601 if (ZeroVecIdx != UINT32_MAX) {
5602 bool MaskFirst =
true;
5607 if (OpNo == ZeroVecIdx &&
I == 0) {
5612 if (OpNo != ZeroVecIdx && Byte == 0) {
5619 if (ZeroIdx != -1) {
5622 if (Bytes[
I] >= 0) {
5625 if (OpNo == ZeroVecIdx)
5635 SDValue Src = ZeroVecIdx == 0 ? Ops[1] : Ops[0];
5653 (!Ops[1].
isUndef() ? Ops[1] : Ops[0]), Op2);
5658struct GeneralShuffle {
5659 GeneralShuffle(
EVT vt) : VT(vt), UnpackFromEltSize(UINT_MAX) {}
5663 void tryPrepareForUnpack();
5664 bool unpackWasPrepared() {
return UnpackFromEltSize <= 4; }
5679 unsigned UnpackFromEltSize;
5684void GeneralShuffle::addUndef() {
5686 for (
unsigned I = 0;
I < BytesPerElement; ++
I)
5687 Bytes.push_back(-1);
5696bool GeneralShuffle::add(
SDValue Op,
unsigned Elem) {
5702 EVT FromVT =
Op.getNode() ?
Op.getValueType() : VT;
5707 if (FromBytesPerElement < BytesPerElement)
5711 (FromBytesPerElement - BytesPerElement));
5714 while (
Op.getNode()) {
5716 Op =
Op.getOperand(0);
5732 }
else if (
Op.isUndef()) {
5741 for (; OpNo < Ops.size(); ++OpNo)
5742 if (Ops[OpNo] ==
Op)
5744 if (OpNo == Ops.size())
5749 for (
unsigned I = 0;
I < BytesPerElement; ++
I)
5750 Bytes.push_back(
Base +
I);
5759 if (Ops.size() == 0)
5763 tryPrepareForUnpack();
5766 if (Ops.size() == 1)
5767 Ops.push_back(DAG.
getUNDEF(MVT::v16i8));
5778 unsigned Stride = 1;
5779 for (; Stride * 2 < Ops.size(); Stride *= 2) {
5780 for (
unsigned I = 0;
I < Ops.size() - Stride;
I += Stride * 2) {
5781 SDValue SubOps[] = { Ops[
I], Ops[
I + Stride] };
5790 else if (OpNo ==
I + Stride)
5801 if (NewBytes[J] >= 0) {
5803 "Invalid double permute");
5806 assert(NewBytesMap[J] < 0 &&
"Invalid double permute");
5812 if (NewBytes[J] >= 0)
5820 Ops[1] = Ops[Stride];
5828 unsigned OpNo0, OpNo1;
5830 if (unpackWasPrepared() && Ops[1].
isUndef())
5832 else if (
const Permute *
P =
matchPermute(Bytes, OpNo0, OpNo1))
5837 Op = insertUnpackIfPrepared(DAG,
DL,
Op);
5844 dbgs() << Msg.c_str() <<
" { ";
5845 for (
unsigned i = 0; i < Bytes.
size(); i++)
5846 dbgs() << Bytes[i] <<
" ";
5854void GeneralShuffle::tryPrepareForUnpack() {
5856 if (ZeroVecOpNo == UINT32_MAX || Ops.size() == 1)
5861 if (Ops.size() > 2 &&
5866 UnpackFromEltSize = 1;
5867 for (; UnpackFromEltSize <= 4; UnpackFromEltSize *= 2) {
5868 bool MatchUnpack =
true;
5871 unsigned ToEltSize = UnpackFromEltSize * 2;
5872 bool IsZextByte = (Elt % ToEltSize) < UnpackFromEltSize;
5875 if (Bytes[Elt] != -1) {
5877 if (IsZextByte != (OpNo == ZeroVecOpNo)) {
5878 MatchUnpack =
false;
5884 if (Ops.size() == 2) {
5887 if (SrcBytes[i] != -1 && SrcBytes[i] % 16 !=
int(i)) {
5888 UnpackFromEltSize = UINT_MAX;
5895 if (UnpackFromEltSize > 4)
5898 LLVM_DEBUG(
dbgs() <<
"Preparing for final unpack of element size "
5899 << UnpackFromEltSize <<
". Zero vector is Op#" << ZeroVecOpNo
5901 dumpBytes(Bytes,
"Original Bytes vector:"););
5906 Elt += UnpackFromEltSize;
5907 for (
unsigned i = 0; i < UnpackFromEltSize; i++, Elt++,
B++)
5908 Bytes[
B] = Bytes[Elt];
5914 Ops.erase(&Ops[ZeroVecOpNo]);
5916 if (Bytes[
I] >= 0) {
5918 if (OpNo > ZeroVecOpNo)
5929 if (!unpackWasPrepared())
5931 unsigned InBits = UnpackFromEltSize * 8;
5935 unsigned OutBits = InBits * 2;
5944 if (!
Op.getOperand(
I).isUndef())
5960 if (
Value.isUndef())
6013 GeneralShuffle GS(VT);
6015 bool FoundOne =
false;
6016 for (
unsigned I = 0;
I < NumElements; ++
I) {
6019 Op =
Op.getOperand(0);
6022 unsigned Elem =
Op.getConstantOperandVal(1);
6023 if (!GS.add(
Op.getOperand(0), Elem))
6026 }
else if (
Op.isUndef()) {
6040 if (!ResidueOps.
empty()) {
6041 while (ResidueOps.
size() < NumElements)
6043 for (
auto &
Op : GS.Ops) {
6044 if (!
Op.getNode()) {
6050 return GS.getNode(DAG,
SDLoc(BVN));
6053bool SystemZTargetLowering::isVectorElementLoad(
SDValue Op)
const {
6054 if (
Op.getOpcode() ==
ISD::LOAD && cast<LoadSDNode>(
Op)->isUnindexed())
6056 if (
auto *AL = dyn_cast<AtomicSDNode>(
Op))
6070 unsigned int NumElements = Elems.
size();
6071 unsigned int Count = 0;
6072 for (
auto Elem : Elems) {
6073 if (!Elem.isUndef()) {
6076 else if (Elem != Single) {
6096 if (
Single.getNode() && (Count > 1 || isVectorElementLoad(Single)))
6100 bool AllLoads =
true;
6101 for (
auto Elem : Elems)
6102 if (!isVectorElementLoad(Elem)) {
6108 if (VT == MVT::v2i64 && !AllLoads)
6112 if (VT == MVT::v2f64 && !AllLoads)
6122 if (VT == MVT::v4f32 && !AllLoads) {
6136 DL, MVT::v2i64, Op01, Op23);
6144 unsigned NumConstants = 0;
6145 for (
unsigned I = 0;
I < NumElements; ++
I) {
6159 if (NumConstants > 0) {
6160 for (
unsigned I = 0;
I < NumElements; ++
I)
6171 std::map<const SDNode*, unsigned> UseCounts;
6172 SDNode *LoadMaxUses =
nullptr;
6173 for (
unsigned I = 0;
I < NumElements; ++
I)
6174 if (isVectorElementLoad(Elems[
I])) {
6175 SDNode *Ld = Elems[
I].getNode();
6177 if (LoadMaxUses ==
nullptr || UseCounts[LoadMaxUses] < UseCounts[Ld])
6180 if (LoadMaxUses !=
nullptr) {
6181 ReplicatedVal =
SDValue(LoadMaxUses, 0);
6185 unsigned I1 = NumElements / 2 - 1;
6186 unsigned I2 = NumElements - 1;
6187 bool Def1 = !Elems[
I1].isUndef();
6188 bool Def2 = !Elems[I2].isUndef();
6202 for (
unsigned I = 0;
I < NumElements; ++
I)
6203 if (!
Done[
I] && !Elems[
I].
isUndef() && Elems[
I] != ReplicatedVal)
6211 auto *BVN = cast<BuildVectorSDNode>(
Op.getNode());
6213 EVT VT =
Op.getValueType();
6215 if (BVN->isConstant()) {
6234 for (
unsigned I = 0;
I < NumElements; ++
I)
6235 Ops[
I] =
Op.getOperand(
I);
6236 return buildVector(DAG,
DL, VT, Ops);
6241 auto *VSN = cast<ShuffleVectorSDNode>(
Op.getNode());
6243 EVT VT =
Op.getValueType();
6246 if (VSN->isSplat()) {
6248 unsigned Index = VSN->getSplatIndex();
6250 "Splat index should be defined and in first operand");
6260 GeneralShuffle
GS(VT);
6261 for (
unsigned I = 0;
I < NumElements; ++
I) {
6262 int Elt = VSN->getMaskElt(
I);
6265 else if (!
GS.add(
Op.getOperand(
unsigned(Elt) / NumElements),
6266 unsigned(Elt) % NumElements))
6269 return GS.getNode(DAG,
SDLoc(VSN));
6288 EVT VT =
Op.getValueType();
6293 if (VT == MVT::v2f64 &&
6313SystemZTargetLowering::lowerEXTRACT_VECTOR_ELT(
SDValue Op,
6319 EVT VT =
Op.getValueType();
6323 if (
auto *CIndexN = dyn_cast<ConstantSDNode>(Op1)) {
6338SDValue SystemZTargetLowering::
6341 EVT OutVT =
Op.getValueType();
6351 }
while (FromBits != ToBits);
6356SDValue SystemZTargetLowering::
6360 EVT OutVT =
Op.getValueType();
6364 unsigned NumInPerOut = InNumElts / OutNumElts;
6370 unsigned ZeroVecElt = InNumElts;
6371 for (
unsigned PackedElt = 0; PackedElt < OutNumElts; PackedElt++) {
6372 unsigned MaskElt = PackedElt * NumInPerOut;
6373 unsigned End = MaskElt + NumInPerOut - 1;
6374 for (; MaskElt <
End; MaskElt++)
6375 Mask[MaskElt] = ZeroVecElt++;
6376 Mask[MaskElt] = PackedElt;
6383 unsigned ByScalar)
const {
6388 EVT VT =
Op.getValueType();
6392 if (
auto *BVN = dyn_cast<BuildVectorSDNode>(Op1)) {
6393 APInt SplatBits, SplatUndef;
6394 unsigned SplatBitSize;
6398 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs,
6399 ElemBitSize,
true) &&
6400 SplatBitSize == ElemBitSize) {
6403 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6412 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6418 if (
auto *VSN = dyn_cast<ShuffleVectorSDNode>(Op1)) {
6419 if (VSN->isSplat()) {
6421 unsigned Index = VSN->getSplatIndex();
6423 "Splat index should be defined and in first operand");
6430 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6442 MVT DstVT =
Op.getSimpleValueType();
6445 unsigned SrcAS =
N->getSrcAddressSpace();
6447 assert(SrcAS !=
N->getDestAddressSpace() &&
6448 "addrspacecast must be between different address spaces");
6456 }
else if (DstVT == MVT::i32) {
6470 MVT ResultVT =
Op.getSimpleValueType();
6472 unsigned Check =
Op.getConstantOperandVal(1);
6474 unsigned TDCMask = 0;
6508 int SPFI = cast<FrameIndexSDNode>(
StackPtr.getNode())->getIndex();
6519 return DAG.
getLoad(MVT::i64,
DL, Chain, StackPtr, MPI);
6524 switch (
Op.getOpcode()) {
6526 return lowerFRAMEADDR(
Op, DAG);
6528 return lowerRETURNADDR(
Op, DAG);
6530 return lowerBR_CC(
Op, DAG);
6532 return lowerSELECT_CC(
Op, DAG);
6534 return lowerSETCC(
Op, DAG);
6536 return lowerSTRICT_FSETCC(
Op, DAG,
false);
6538 return lowerSTRICT_FSETCC(
Op, DAG,
true);
6540 return lowerGlobalAddress(cast<GlobalAddressSDNode>(
Op), DAG);
6542 return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(
Op), DAG);
6544 return lowerBlockAddress(cast<BlockAddressSDNode>(
Op), DAG);
6546 return lowerJumpTable(cast<JumpTableSDNode>(
Op), DAG);
6548 return lowerConstantPool(cast<ConstantPoolSDNode>(
Op), DAG);
6550 return lowerBITCAST(
Op, DAG);
6552 return lowerVASTART(
Op, DAG);
6554 return lowerVACOPY(
Op, DAG);
6556 return lowerDYNAMIC_STACKALLOC(
Op, DAG);
6558 return lowerGET_DYNAMIC_AREA_OFFSET(
Op, DAG);
6564 return lowerSMUL_LOHI(
Op, DAG);
6566 return lowerUMUL_LOHI(
Op, DAG);
6568 return lowerSDIVREM(
Op, DAG);
6570 return lowerUDIVREM(
Op, DAG);
6575 return lowerXALUO(
Op, DAG);
6578 return lowerUADDSUBO_CARRY(
Op, DAG);
6580 return lowerOR(
Op, DAG);
6582 return lowerCTPOP(
Op, DAG);
6584 return lowerVECREDUCE_ADD(
Op, DAG);
6586 return lowerATOMIC_FENCE(
Op, DAG);
6591 return lowerATOMIC_LDST_I128(
Op, DAG);
6595 return lowerATOMIC_LOAD_SUB(
Op, DAG);
6613 return lowerATOMIC_CMP_SWAP(
Op, DAG);
6615 return lowerSTACKSAVE(
Op, DAG);
6617 return lowerSTACKRESTORE(
Op, DAG);
6619 return lowerPREFETCH(
Op, DAG);
6621 return lowerINTRINSIC_W_CHAIN(
Op, DAG);
6623 return lowerINTRINSIC_WO_CHAIN(
Op, DAG);
6625 return lowerBUILD_VECTOR(
Op, DAG);
6627 return lowerVECTOR_SHUFFLE(
Op, DAG);
6629 return lowerSCALAR_TO_VECTOR(
Op, DAG);
6631 return lowerINSERT_VECTOR_ELT(
Op, DAG);
6633 return lowerEXTRACT_VECTOR_ELT(
Op, DAG);
6635 return lowerSIGN_EXTEND_VECTOR_INREG(
Op, DAG);
6637 return lowerZERO_EXTEND_VECTOR_INREG(
Op, DAG);
6649 return lowerIS_FPCLASS(
Op, DAG);
6651 return lowerGET_ROUNDING(
Op, DAG);
6653 return lowerREADCYCLECOUNTER(
Op, DAG);
6675 &SystemZ::FP128BitRegClass);
6684 SystemZ::REG_SEQUENCE, SL, MVT::f128,
6699 &SystemZ::FP128BitRegClass);
6717 switch (
N->getOpcode()) {
6721 SDValue Ops[] = {
N->getOperand(0),
N->getOperand(1) };
6724 DL, Tys, Ops, MVT::i128, MMO);
6727 if (
N->getValueType(0) == MVT::f128)
6741 SDValue Ops[] = {
N->getOperand(0), Val,
N->getOperand(2)};
6744 DL, Tys, Ops, MVT::i128, MMO);
6747 if (cast<AtomicSDNode>(
N)->getSuccessOrdering() ==
6750 MVT::Other, Res), 0);
6757 SDValue Ops[] = {
N->getOperand(0),
N->getOperand(1),
6762 DL, Tys, Ops, MVT::i128, MMO);
6773 if (
N->getValueType(0) == MVT::i128 && Src.getValueType() == MVT::f128 &&
6793#define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME
6904 OPCODE(ATOMIC_LOADW_ADD);
6905 OPCODE(ATOMIC_LOADW_SUB);
6906 OPCODE(ATOMIC_LOADW_AND);
6908 OPCODE(ATOMIC_LOADW_XOR);
6909 OPCODE(ATOMIC_LOADW_NAND);
6910 OPCODE(ATOMIC_LOADW_MIN);
6911 OPCODE(ATOMIC_LOADW_MAX);
6912 OPCODE(ATOMIC_LOADW_UMIN);
6913 OPCODE(ATOMIC_LOADW_UMAX);
6914 OPCODE(ATOMIC_CMP_SWAPW);
6917 OPCODE(ATOMIC_STORE_128);
6918 OPCODE(ATOMIC_CMP_SWAP_128);
6933bool SystemZTargetLowering::canTreatAsByteVector(
EVT VT)
const {
6934 if (!Subtarget.hasVector())
6948 DAGCombinerInfo &DCI,
6956 unsigned Opcode =
Op.getOpcode();
6959 Op =
Op.getOperand(0);
6961 canTreatAsByteVector(
Op.getValueType())) {
6970 BytesPerElement,
First))
6977 if (Byte % BytesPerElement != 0)
6980 Index = Byte / BytesPerElement;
6984 canTreatAsByteVector(
Op.getValueType())) {
6987 EVT OpVT =
Op.getValueType();
6989 if (OpBytesPerElement < BytesPerElement)
6993 unsigned End = (
Index + 1) * BytesPerElement;
6994 if (
End % OpBytesPerElement != 0)
6997 Op =
Op.getOperand(
End / OpBytesPerElement - 1);
6998 if (!
Op.getValueType().isInteger()) {
7001 DCI.AddToWorklist(
Op.getNode());
7006 DCI.AddToWorklist(
Op.getNode());
7013 canTreatAsByteVector(
Op.getValueType()) &&
7014 canTreatAsByteVector(
Op.getOperand(0).getValueType())) {
7016 EVT ExtVT =
Op.getValueType();
7017 EVT OpVT =
Op.getOperand(0).getValueType();
7020 unsigned Byte =
Index * BytesPerElement;
7021 unsigned SubByte =
Byte % ExtBytesPerElement;
7022 unsigned MinSubByte = ExtBytesPerElement - OpBytesPerElement;
7023 if (SubByte < MinSubByte ||
7024 SubByte + BytesPerElement > ExtBytesPerElement)
7027 Byte =
Byte / ExtBytesPerElement * OpBytesPerElement;
7029 Byte += SubByte - MinSubByte;
7030 if (Byte % BytesPerElement != 0)
7032 Op =
Op.getOperand(0);
7039 if (
Op.getValueType() != VecVT) {
7041 DCI.AddToWorklist(
Op.getNode());
7051SDValue SystemZTargetLowering::combineTruncateExtract(
7060 if (canTreatAsByteVector(VecVT)) {
7061 if (
auto *IndexN = dyn_cast<ConstantSDNode>(
Op.getOperand(1))) {
7064 if (BytesPerElement % TruncBytes == 0) {
7070 unsigned Scale = BytesPerElement / TruncBytes;
7071 unsigned NewIndex = (IndexN->getZExtValue() + 1) * Scale - 1;
7078 EVT ResVT = (TruncBytes < 4 ? MVT::i32 : TruncVT);
7079 return combineExtract(
DL, ResVT, VecVT, Vec, NewIndex, DCI,
true);
7087SDValue SystemZTargetLowering::combineZERO_EXTEND(
7088 SDNode *
N, DAGCombinerInfo &DCI)
const {
7092 EVT VT =
N->getValueType(0);
7094 auto *TrueOp = dyn_cast<ConstantSDNode>(N0.
getOperand(0));
7095 auto *FalseOp = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
7096 if (TrueOp && FalseOp) {
7106 DCI.CombineTo(N0.
getNode(), TruncSelect);
7136SDValue SystemZTargetLowering::combineSIGN_EXTEND_INREG(
7137 SDNode *
N, DAGCombinerInfo &DCI)
const {
7143 EVT VT =
N->getValueType(0);
7144 EVT EVT = cast<VTSDNode>(
N->getOperand(1))->getVT();
7157SDValue SystemZTargetLowering::combineSIGN_EXTEND(
7158 SDNode *
N, DAGCombinerInfo &DCI)
const {
7164 EVT VT =
N->getValueType(0);
7166 auto *SraAmt = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
7169 if (
auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.
getOperand(1))) {
7171 unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra;
7172 unsigned NewSraAmt = SraAmt->getZExtValue() + Extra;
7188SDValue SystemZTargetLowering::combineMERGE(
7189 SDNode *
N, DAGCombinerInfo &DCI)
const {
7191 unsigned Opcode =
N->getOpcode();
7199 if (Op1 ==
N->getOperand(0))
7204 if (ElemBytes <= 4) {
7212 DCI.AddToWorklist(Op1.
getNode());
7215 DCI.AddToWorklist(
Op.getNode());
7224 LoPart = HiPart =
nullptr;
7229 if (
Use.getResNo() != 0)
7234 bool IsLoPart =
true;
7259 LoPart = HiPart =
nullptr;
7264 if (
Use.getResNo() != 0)
7270 User->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
7273 switch (
User->getConstantOperandVal(1)) {
7274 case SystemZ::subreg_l64:
7279 case SystemZ::subreg_h64:
7291SDValue SystemZTargetLowering::combineLOAD(
7292 SDNode *
N, DAGCombinerInfo &DCI)
const {
7294 EVT LdVT =
N->getValueType(0);
7295 if (
auto *LN = dyn_cast<LoadSDNode>(
N)) {
7298 MVT LoadNodeVT = LN->getBasePtr().getSimpleValueType();
7299 if (PtrVT != LoadNodeVT) {
7303 return DAG.
getExtLoad(LN->getExtensionType(),
DL, LN->getValueType(0),
7304 LN->getChain(), AddrSpaceCast, LN->getMemoryVT(),
7305 LN->getMemOperand());
7323 LD->getPointerInfo(),
LD->getOriginalAlign(),
7324 LD->getMemOperand()->getFlags(),
LD->getAAInfo());
7326 DCI.CombineTo(HiPart, EltLoad,
true);
7333 LD->getPointerInfo().getWithOffset(8),
LD->getOriginalAlign(),
7334 LD->getMemOperand()->getFlags(),
LD->getAAInfo());
7336 DCI.CombineTo(LoPart, EltLoad,
true);
7343 DCI.AddToWorklist(Chain.
getNode());
7363 }
else if (
Use.getResNo() == 0)
7366 if (!Replicate || OtherUses.
empty())
7372 for (
SDNode *U : OtherUses) {
7381bool SystemZTargetLowering::canLoadStoreByteSwapped(
EVT VT)
const {
7382 if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64)
7384 if (Subtarget.hasVectorEnhancements2())
7385 if (VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v2i64 || VT == MVT::i128)
7397 for (
unsigned i = 0; i < NumElts; ++i) {
7398 if (M[i] < 0)
continue;
7399 if ((
unsigned) M[i] != NumElts - 1 - i)
7407 for (
auto *U : StoredVal->
users()) {
7409 EVT CurrMemVT = ST->getMemoryVT().getScalarType();
7412 }
else if (isa<BuildVectorSDNode>(U)) {
7468SDValue SystemZTargetLowering::combineSTORE(
7469 SDNode *
N, DAGCombinerInfo &DCI)
const {
7471 auto *SN = cast<StoreSDNode>(
N);
7472 auto &Op1 =
N->getOperand(1);
7473 EVT MemVT = SN->getMemoryVT();
7477 MVT StoreNodeVT = SN->getBasePtr().getSimpleValueType();
7478 if (PtrVT != StoreNodeVT) {
7482 return DAG.
getStore(SN->getChain(),
DL, SN->getValue(), AddrSpaceCast,
7483 SN->getPointerInfo(), SN->getOriginalAlign(),
7484 SN->getMemOperand()->getFlags(), SN->getAAInfo());
7492 if (MemVT.
isInteger() && SN->isTruncatingStore()) {
7494 combineTruncateExtract(
SDLoc(
N), MemVT, SN->getValue(), DCI)) {
7495 DCI.AddToWorklist(
Value.getNode());
7499 SN->getBasePtr(), SN->getMemoryVT(),
7500 SN->getMemOperand());
7504 if (!SN->isTruncatingStore() &&
7515 N->getOperand(0), BSwapOp,
N->getOperand(2)
7520 Ops, MemVT, SN->getMemOperand());
7523 if (!SN->isTruncatingStore() &&
7526 Subtarget.hasVectorEnhancements2()) {
7536 Ops, MemVT, SN->getMemOperand());
7541 if (!SN->isTruncatingStore() &&
7544 N->getOperand(0).reachesChainWithoutSideEffects(
SDValue(Op1.
getNode(), 1))) {
7548 Ops, MemVT, SN->getMemOperand());
7558 DAG.
getStore(SN->getChain(),
DL, HiPart, SN->getBasePtr(),
7559 SN->getPointerInfo(), SN->getOriginalAlign(),
7560 SN->getMemOperand()->getFlags(), SN->getAAInfo());
7565 SN->getPointerInfo().getWithOffset(8),
7566 SN->getOriginalAlign(),
7567 SN->getMemOperand()->
getFlags(), SN->getAAInfo());
7587 if (
C->getAPIntValue().getBitWidth() > 64 ||
C->isAllOnes() ||
7591 APInt Val =
C->getAPIntValue();
7594 assert(SN->isTruncatingStore() &&
7595 "Non-truncating store and immediate value does not fit?");
7596 Val = Val.
trunc(TotBytes * 8);
7600 if (VCI.isVectorConstantLegal(Subtarget) &&
7609 auto FindReplicatedReg = [&](
SDValue MulOp) {
7610 EVT MulVT = MulOp.getValueType();
7611 if (MulOp->getOpcode() ==
ISD::MUL &&
7612 (MulVT == MVT::i16 || MulVT == MVT::i32 || MulVT == MVT::i64)) {
7616 WordVT =
LHS->getOperand(0).getValueType();
7618 WordVT = cast<VTSDNode>(
LHS->getOperand(1))->getVT();
7622 if (
auto *
C = dyn_cast<ConstantSDNode>(MulOp->getOperand(1))) {
7624 APInt(MulVT.getSizeInBits(),
C->getZExtValue()));
7625 if (VCI.isVectorConstantLegal(Subtarget) &&
7627 WordVT == VCI.VecVT.getScalarType())
7633 if (isa<BuildVectorSDNode>(Op1) &&
7636 if (
auto *
C = dyn_cast<ConstantSDNode>(SplatVal))
7639 FindReplicatedReg(SplatVal);
7641 if (
auto *
C = dyn_cast<ConstantSDNode>(Op1))
7644 FindReplicatedReg(Op1);
7649 "Bad type handling");
7654 SN->getBasePtr(), SN->getMemOperand());
7661SDValue SystemZTargetLowering::combineVECTOR_SHUFFLE(
7662 SDNode *
N, DAGCombinerInfo &DCI)
const {
7666 N->getOperand(0).hasOneUse() &&
7667 Subtarget.hasVectorEnhancements2()) {
7682 Ops,
LD->getMemoryVT(),
LD->getMemOperand());
7686 DCI.CombineTo(
N, ESLoad);
7690 DCI.CombineTo(
Load.getNode(), ESLoad, ESLoad.
getValue(1));
7700SDValue SystemZTargetLowering::combineEXTRACT_VECTOR_ELT(
7701 SDNode *
N, DAGCombinerInfo &DCI)
const {
7704 if (!Subtarget.hasVector())
7710 Op.getValueType().isVector() &&
7711 Op.getOperand(0).getValueType().isVector() &&
7712 Op.getValueType().getVectorNumElements() ==
7713 Op.getOperand(0).getValueType().getVectorNumElements())
7714 Op =
Op.getOperand(0);
7718 EVT VecVT =
Op.getValueType();
7721 Op.getOperand(0),
N->getOperand(1));
7722 DCI.AddToWorklist(
Op.getNode());
7724 if (EltVT !=
N->getValueType(0)) {
7725 DCI.AddToWorklist(
Op.getNode());
7732 if (
auto *IndexN = dyn_cast<ConstantSDNode>(
N->getOperand(1))) {
7735 if (canTreatAsByteVector(VecVT))
7736 return combineExtract(
SDLoc(
N),
N->getValueType(0), VecVT, Op0,
7737 IndexN->getZExtValue(), DCI,
false);
7742SDValue SystemZTargetLowering::combineJOIN_DWORDS(
7743 SDNode *
N, DAGCombinerInfo &DCI)
const {
7746 if (
N->getOperand(0) ==
N->getOperand(1))
7757 if (Chain1 == Chain2)
7765SDValue SystemZTargetLowering::combineFP_ROUND(
7766 SDNode *
N, DAGCombinerInfo &DCI)
const {
7768 if (!Subtarget.hasVector())
7777 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
7780 if (
N->getValueType(0) == MVT::f32 && Op0.
hasOneUse() &&
7786 for (
auto *U : Vec->
users()) {
7787 if (U != Op0.
getNode() &&
U->hasOneUse() &&
7789 U->getOperand(0) == Vec &&
7791 U->getConstantOperandVal(1) == 1) {
7793 if (OtherRound.
getOpcode() ==
N->getOpcode() &&
7797 if (
N->isStrictFPOpcode()) {
7802 {MVT::v4f32, MVT::Other}, {Chain, Vec});
7807 DCI.AddToWorklist(VRound.
getNode());
7811 DCI.AddToWorklist(Extract1.
getNode());
7820 N->getVTList(), Extract0, Chain);
7829SDValue SystemZTargetLowering::combineFP_EXTEND(
7830 SDNode *
N, DAGCombinerInfo &DCI)
const {
7832 if (!Subtarget.hasVector())
7841 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
7844 if (
N->getValueType(0) == MVT::f64 && Op0.
hasOneUse() &&
7850 for (
auto *U : Vec->
users()) {
7851 if (U != Op0.
getNode() &&
U->hasOneUse() &&
7853 U->getOperand(0) == Vec &&
7855 U->getConstantOperandVal(1) == 2) {
7857 if (OtherExtend.
getOpcode() ==
N->getOpcode() &&
7861 if (
N->isStrictFPOpcode()) {
7866 {MVT::v2f64, MVT::Other}, {Chain, Vec});
7871 DCI.AddToWorklist(VExtend.
getNode());
7875 DCI.AddToWorklist(Extract1.
getNode());
7884 N->getVTList(), Extract0, Chain);
7893SDValue SystemZTargetLowering::combineINT_TO_FP(
7894 SDNode *
N, DAGCombinerInfo &DCI)
const {
7899 unsigned Opcode =
N->getOpcode();
7900 EVT OutVT =
N->getValueType(0);
7904 unsigned InScalarBits =
Op->getValueType(0).getScalarSizeInBits();
7910 if (OutLLVMTy->
isVectorTy() && OutScalarBits > InScalarBits &&
7911 OutScalarBits <= 64) {
7912 unsigned NumElts = cast<FixedVectorType>(OutLLVMTy)->getNumElements();
7915 unsigned ExtOpcode =
7923SDValue SystemZTargetLowering::combineBSWAP(
7924 SDNode *
N, DAGCombinerInfo &DCI)
const {
7928 N->getOperand(0).hasOneUse() &&
7929 canLoadStoreByteSwapped(
N->getValueType(0))) {
7938 EVT LoadVT =
N->getValueType(0);
7939 if (LoadVT == MVT::i16)
7944 Ops,
LD->getMemoryVT(),
LD->getMemOperand());
7948 if (
N->getValueType(0) == MVT::i16)
7953 DCI.CombineTo(
N, ResVal);
7957 DCI.CombineTo(
Load.getNode(), ResVal, BSLoad.
getValue(1));
7966 Op.getValueType().isVector() &&
7967 Op.getOperand(0).getValueType().isVector() &&
7968 Op.getValueType().getVectorNumElements() ==
7969 Op.getOperand(0).getValueType().getVectorNumElements())
7970 Op =
Op.getOperand(0);
7982 (canLoadStoreByteSwapped(
N->getValueType(0)) &&
7984 EVT VecVT =
N->getValueType(0);
7985 EVT EltVT =
N->getValueType(0).getVectorElementType();
7988 DCI.AddToWorklist(Vec.
getNode());
7992 DCI.AddToWorklist(Elt.
getNode());
7995 DCI.AddToWorklist(Vec.
getNode());
7997 DCI.AddToWorklist(Elt.
getNode());
8005 if (SV &&
Op.hasOneUse()) {
8013 EVT VecVT =
N->getValueType(0);
8016 DCI.AddToWorklist(Op0.
getNode());
8020 DCI.AddToWorklist(Op1.
getNode());
8023 DCI.AddToWorklist(Op0.
getNode());
8025 DCI.AddToWorklist(Op1.
getNode());
8047 auto *CompareRHS = dyn_cast<ConstantSDNode>(ICmp->getOperand(1));
8054 bool Invert =
false;
8061 auto *TrueVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(0));
8064 auto *FalseVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1));
8067 if (CompareRHS->getAPIntValue() == FalseVal->getAPIntValue())
8069 else if (CompareRHS->getAPIntValue() != TrueVal->getAPIntValue())
8073 auto *NewCCValid = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(2));
8074 auto *NewCCMask = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(3));
8075 if (!NewCCValid || !NewCCMask)
8077 CCValid = NewCCValid->getZExtValue();
8078 CCMask = NewCCMask->getZExtValue();
8088 if (CompareLHS->getOpcode() ==
ISD::SRA) {
8089 auto *SRACount = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1));
8090 if (!SRACount || SRACount->getZExtValue() != 30)
8092 auto *SHL = CompareLHS->getOperand(0).getNode();
8095 auto *SHLCount = dyn_cast<ConstantSDNode>(SHL->getOperand(1));
8098 auto *IPM = SHL->getOperand(0).getNode();
8103 if (!CompareLHS->hasOneUse())
8106 if (CompareRHS->getZExtValue() != 0)
8113 CCReg = IPM->getOperand(0);
8120SDValue SystemZTargetLowering::combineBR_CCMASK(
8121 SDNode *
N, DAGCombinerInfo &DCI)
const {
8125 auto *CCValid = dyn_cast<ConstantSDNode>(
N->getOperand(1));
8126 auto *CCMask = dyn_cast<ConstantSDNode>(
N->getOperand(2));
8127 if (!CCValid || !CCMask)
8130 int CCValidVal = CCValid->getZExtValue();
8131 int CCMaskVal = CCMask->getZExtValue();
8140 N->getOperand(3), CCReg);
8144SDValue SystemZTargetLowering::combineSELECT_CCMASK(
8145 SDNode *
N, DAGCombinerInfo &DCI)
const {
8149 auto *CCValid = dyn_cast<ConstantSDNode>(
N->getOperand(2));
8150 auto *CCMask = dyn_cast<ConstantSDNode>(
N->getOperand(3));
8151 if (!CCValid || !CCMask)
8154 int CCValidVal = CCValid->getZExtValue();
8155 int CCMaskVal = CCMask->getZExtValue();
8160 N->getOperand(0),
N->getOperand(1),
8168SDValue SystemZTargetLowering::combineGET_CCMASK(
8169 SDNode *
N, DAGCombinerInfo &DCI)
const {
8172 auto *CCValid = dyn_cast<ConstantSDNode>(
N->getOperand(1));
8173 auto *CCMask = dyn_cast<ConstantSDNode>(
N->getOperand(2));
8174 if (!CCValid || !CCMask)
8176 int CCValidVal = CCValid->getZExtValue();
8177 int CCMaskVal = CCMask->getZExtValue();
8185 auto *SelectCCValid = dyn_cast<ConstantSDNode>(
Select->getOperand(2));
8186 auto *SelectCCMask = dyn_cast<ConstantSDNode>(
Select->getOperand(3));
8187 if (!SelectCCValid || !SelectCCMask)
8189 int SelectCCValidVal = SelectCCValid->getZExtValue();
8190 int SelectCCMaskVal = SelectCCMask->getZExtValue();
8192 auto *
TrueVal = dyn_cast<ConstantSDNode>(
Select->getOperand(0));
8193 auto *
FalseVal = dyn_cast<ConstantSDNode>(
Select->getOperand(1));
8194 if (!TrueVal || !FalseVal)
8198 else if (
TrueVal->getZExtValue() == 0 &&
FalseVal->getZExtValue() == 1)
8199 SelectCCMaskVal ^= SelectCCValidVal;
8203 if (SelectCCValidVal & ~CCValidVal)
8205 if (SelectCCMaskVal != (CCMaskVal & SelectCCValidVal))
8208 return Select->getOperand(4);
8211SDValue SystemZTargetLowering::combineIntDIVREM(
8212 SDNode *
N, DAGCombinerInfo &DCI)
const {
8214 EVT VT =
N->getValueType(0);
8228SDValue SystemZTargetLowering::combineINTRINSIC(
8229 SDNode *
N, DAGCombinerInfo &DCI)
const {
8232 unsigned Id =
N->getConstantOperandVal(1);
8236 case Intrinsic::s390_vll:
8237 case Intrinsic::s390_vlrl:
8238 if (
auto *
C = dyn_cast<ConstantSDNode>(
N->getOperand(2)))
8239 if (
C->getZExtValue() >= 15)
8244 case Intrinsic::s390_vstl:
8245 case Intrinsic::s390_vstrl:
8246 if (
auto *
C = dyn_cast<ConstantSDNode>(
N->getOperand(3)))
8247 if (
C->getZExtValue() >= 15)
8258 return N->getOperand(0);
8264 switch(
N->getOpcode()) {
8289 case ISD::UREM:
return combineIntDIVREM(
N, DCI);
8301 EVT VT =
Op.getValueType();
8304 unsigned Opcode =
Op.getOpcode();
8306 unsigned Id =
Op.getConstantOperandVal(0);
8308 case Intrinsic::s390_vpksh:
8309 case Intrinsic::s390_vpksf:
8310 case Intrinsic::s390_vpksg:
8311 case Intrinsic::s390_vpkshs:
8312 case Intrinsic::s390_vpksfs:
8313 case Intrinsic::s390_vpksgs:
8314 case Intrinsic::s390_vpklsh:
8315 case Intrinsic::s390_vpklsf:
8316 case Intrinsic::s390_vpklsg:
8317 case Intrinsic::s390_vpklshs:
8318 case Intrinsic::s390_vpklsfs:
8319 case Intrinsic::s390_vpklsgs:
8321 SrcDemE = DemandedElts;
8324 SrcDemE = SrcDemE.
trunc(NumElts / 2);
8327 case Intrinsic::s390_vuphb:
8328 case Intrinsic::s390_vuphh:
8329 case Intrinsic::s390_vuphf:
8330 case Intrinsic::s390_vuplhb:
8331 case Intrinsic::s390_vuplhh:
8332 case Intrinsic::s390_vuplhf:
8333 SrcDemE =
APInt(NumElts * 2, 0);
8336 case Intrinsic::s390_vuplb:
8337 case Intrinsic::s390_vuplhw:
8338 case Intrinsic::s390_vuplf:
8339 case Intrinsic::s390_vupllb:
8340 case Intrinsic::s390_vupllh:
8341 case Intrinsic::s390_vupllf:
8342 SrcDemE =
APInt(NumElts * 2, 0);
8345 case Intrinsic::s390_vpdi: {
8347 SrcDemE =
APInt(NumElts, 0);
8348 if (!DemandedElts[OpNo - 1])
8350 unsigned Mask =
Op.getConstantOperandVal(3);
8351 unsigned MaskBit = ((OpNo - 1) ? 1 : 4);
8353 SrcDemE.
setBit((Mask & MaskBit)? 1 : 0);
8356 case Intrinsic::s390_vsldb: {
8358 assert(VT == MVT::v16i8 &&
"Unexpected type.");
8359 unsigned FirstIdx =
Op.getConstantOperandVal(3);
8360 assert (FirstIdx > 0 && FirstIdx < 16 &&
"Unused operand.");
8361 unsigned NumSrc0Els = 16 - FirstIdx;
8362 SrcDemE =
APInt(NumElts, 0);
8364 APInt DemEls = DemandedElts.
trunc(NumSrc0Els);
8367 APInt DemEls = DemandedElts.
lshr(NumSrc0Els);
8372 case Intrinsic::s390_vperm:
8383 SrcDemE =
APInt(1, 1);
8386 SrcDemE = DemandedElts;
8397 const APInt &DemandedElts,
8412 const APInt &DemandedElts,
8414 unsigned Depth)
const {
8418 unsigned tmp0, tmp1;
8423 EVT VT =
Op.getValueType();
8424 if (
Op.getResNo() != 0 || VT == MVT::Untyped)
8427 "KnownBits does not match VT in bitwidth");
8430 "DemandedElts does not match VT number of elements");
8432 unsigned Opcode =
Op.getOpcode();
8434 bool IsLogical =
false;
8435 unsigned Id =
Op.getConstantOperandVal(0);
8437 case Intrinsic::s390_vpksh:
8438 case Intrinsic::s390_vpksf:
8439 case Intrinsic::s390_vpksg:
8440 case Intrinsic::s390_vpkshs:
8441 case Intrinsic::s390_vpksfs:
8442 case Intrinsic::s390_vpksgs:
8443 case Intrinsic::s390_vpklsh:
8444 case Intrinsic::s390_vpklsf:
8445 case Intrinsic::s390_vpklsg:
8446 case Intrinsic::s390_vpklshs:
8447 case Intrinsic::s390_vpklsfs:
8448 case Intrinsic::s390_vpklsgs:
8449 case Intrinsic::s390_vpdi:
8450 case Intrinsic::s390_vsldb:
8451 case Intrinsic::s390_vperm:
8454 case Intrinsic::s390_vuplhb:
8455 case Intrinsic::s390_vuplhh:
8456 case Intrinsic::s390_vuplhf:
8457 case Intrinsic::s390_vupllb:
8458 case Intrinsic::s390_vupllh:
8459 case Intrinsic::s390_vupllf:
8462 case Intrinsic::s390_vuphb:
8463 case Intrinsic::s390_vuphh:
8464 case Intrinsic::s390_vuphf:
8465 case Intrinsic::s390_vuplb:
8466 case Intrinsic::s390_vuplhw:
8467 case Intrinsic::s390_vuplf: {
8509 if (
LHS == 1)
return 1;
8512 if (
RHS == 1)
return 1;
8513 unsigned Common = std::min(
LHS,
RHS);
8514 unsigned SrcBitWidth =
Op.getOperand(OpNo).getScalarValueSizeInBits();
8515 EVT VT =
Op.getValueType();
8517 if (SrcBitWidth > VTBits) {
8518 unsigned SrcExtraBits = SrcBitWidth - VTBits;
8519 if (Common > SrcExtraBits)
8520 return (Common - SrcExtraBits);
8523 assert (SrcBitWidth == VTBits &&
"Expected operands of same bitwidth.");
8530 unsigned Depth)
const {
8531 if (
Op.getResNo() != 0)
8533 unsigned Opcode =
Op.getOpcode();
8535 unsigned Id =
Op.getConstantOperandVal(0);
8537 case Intrinsic::s390_vpksh:
8538 case Intrinsic::s390_vpksf:
8539 case Intrinsic::s390_vpksg:
8540 case Intrinsic::s390_vpkshs:
8541 case Intrinsic::s390_vpksfs:
8542 case Intrinsic::s390_vpksgs:
8543 case Intrinsic::s390_vpklsh:
8544 case Intrinsic::s390_vpklsf:
8545 case Intrinsic::s390_vpklsg:
8546 case Intrinsic::s390_vpklshs:
8547 case Intrinsic::s390_vpklsfs:
8548 case Intrinsic::s390_vpklsgs:
8549 case Intrinsic::s390_vpdi:
8550 case Intrinsic::s390_vsldb:
8551 case Intrinsic::s390_vperm:
8553 case Intrinsic::s390_vuphb:
8554 case Intrinsic::s390_vuphh:
8555 case Intrinsic::s390_vuphf:
8556 case Intrinsic::s390_vuplb:
8557 case Intrinsic::s390_vuplhw:
8558 case Intrinsic::s390_vuplf: {
8562 EVT VT =
Op.getValueType();
8586 switch (
Op->getOpcode()) {
8599 "Unexpected stack alignment");
8602 unsigned StackProbeSize =
8605 StackProbeSize &= ~(StackAlign - 1);
8606 return StackProbeSize ? StackProbeSize : StackAlign;
8623 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
8629 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
8655 if (Succ->isLiveIn(SystemZ::CC))
8666 switch (
MI.getOpcode()) {
8667 case SystemZ::Select32:
8668 case SystemZ::Select64:
8669 case SystemZ::Select128:
8670 case SystemZ::SelectF32:
8671 case SystemZ::SelectF64:
8672 case SystemZ::SelectF128:
8673 case SystemZ::SelectVR32:
8674 case SystemZ::SelectVR64:
8675 case SystemZ::SelectVR128:
8707 for (
auto *
MI : Selects) {
8708 Register DestReg =
MI->getOperand(0).getReg();
8709 Register TrueReg =
MI->getOperand(1).getReg();
8710 Register FalseReg =
MI->getOperand(2).getReg();
8715 if (
MI->getOperand(4).getImm() == (CCValid ^ CCMask))
8718 if (
auto It = RegRewriteTable.
find(TrueReg); It != RegRewriteTable.
end())
8719 TrueReg = It->second.first;
8721 if (
auto It = RegRewriteTable.
find(FalseReg); It != RegRewriteTable.
end())
8722 FalseReg = It->second.second;
8725 BuildMI(*SinkMBB, SinkInsertionPoint,
DL,
TII->get(SystemZ::PHI), DestReg)
8730 RegRewriteTable[DestReg] = std::make_pair(TrueReg, FalseReg);
8742 assert(TFL->hasReservedCallFrame(MF) &&
8743 "ADJSTACKDOWN and ADJSTACKUP should be no-ops");
8748 uint32_t NumBytes =
MI.getOperand(0).getImm();
8753 MI.eraseFromParent();
8764 unsigned CCValid =
MI.getOperand(3).getImm();
8765 unsigned CCMask =
MI.getOperand(4).getImm();
8777 assert(NextMI.getOperand(3).getImm() == CCValid &&
8778 "Bad CCValid operands since CC was not redefined.");
8779 if (NextMI.getOperand(4).getImm() == CCMask ||
8780 NextMI.getOperand(4).getImm() == (CCValid ^ CCMask)) {
8786 if (NextMI.definesRegister(SystemZ::CC,
nullptr) ||
8787 NextMI.usesCustomInsertionHook())
8790 for (
auto *SelMI : Selects)
8791 if (NextMI.readsVirtualRegister(SelMI->getOperand(0).getReg())) {
8795 if (NextMI.isDebugInstr()) {
8797 assert(NextMI.isDebugValue() &&
"Unhandled debug opcode.");
8800 }
else if (
User || ++Count > 20)
8805 bool CCKilled = (LastMI->
killsRegister(SystemZ::CC,
nullptr) ||
8837 for (
auto *SelMI : Selects)
8838 SelMI->eraseFromParent();
8841 for (
auto *DbgMI : DbgValues)
8842 MBB->
splice(InsertPos, StartMBB, DbgMI);
8853 unsigned StoreOpcode,
8854 unsigned STOCOpcode,
8855 bool Invert)
const {
8860 int64_t Disp =
MI.getOperand(2).getImm();
8861 Register IndexReg =
MI.getOperand(3).getReg();
8862 unsigned CCValid =
MI.getOperand(4).getImm();
8863 unsigned CCMask =
MI.getOperand(5).getImm();
8866 StoreOpcode =
TII->getOpcodeForOffset(StoreOpcode, Disp);
8871 for (
auto *
I :
MI.memoperands())
8880 if (STOCOpcode && !IndexReg && Subtarget.hasLoadStoreOnCond()) {
8892 MI.eraseFromParent();
8906 if (!
MI.killsRegister(SystemZ::CC,
nullptr) &&
8933 MI.eraseFromParent();
8969 int HiOpcode =
Unsigned? SystemZ::VECLG : SystemZ::VECG;
8988 Register Temp =
MRI.createVirtualRegister(&SystemZ::VR128BitRegClass);
8996 MI.eraseFromParent();
9007 bool Invert)
const {
9016 int64_t Disp =
MI.getOperand(2).getImm();
9018 Register BitShift =
MI.getOperand(4).getReg();
9019 Register NegBitShift =
MI.getOperand(5).getReg();
9020 unsigned BitSize =
MI.getOperand(6).getImm();
9024 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
9025 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
9026 assert(LOpcode && CSOpcode &&
"Displacement out of range");
9029 Register OrigVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
9030 Register OldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
9031 Register NewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
9032 Register RotatedOldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
9033 Register RotatedNewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
9064 Register Tmp =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
9069 }
else if (BinOpcode)
9092 MI.eraseFromParent();
9103 unsigned KeepOldMask)
const {
9111 int64_t Disp =
MI.getOperand(2).getImm();
9113 Register BitShift =
MI.getOperand(4).getReg();
9114 Register NegBitShift =
MI.getOperand(5).getReg();
9115 unsigned BitSize =
MI.getOperand(6).getImm();
9119 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
9120 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
9121 assert(LOpcode && CSOpcode &&
"Displacement out of range");
9124 Register OrigVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
9125 Register OldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
9126 Register NewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
9127 Register RotatedOldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
9128 Register RotatedAltVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
9129 Register RotatedNewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
9196 MI.eraseFromParent();
9212 int64_t Disp =
MI.getOperand(2).getImm();
9214 Register OrigSwapVal =
MI.getOperand(4).getReg();
9215 Register BitShift =
MI.getOperand(5).getReg();
9216 Register NegBitShift =
MI.getOperand(6).getReg();
9217 int64_t BitSize =
MI.getOperand(7).getImm();
9223 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
9224 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
9225 unsigned ZExtOpcode = BitSize == 8 ? SystemZ::LLCR : SystemZ::LLHR;
9226 assert(LOpcode && CSOpcode &&
"Displacement out of range");
9229 Register OrigOldVal =
MRI.createVirtualRegister(RC);
9232 Register StoreVal =
MRI.createVirtualRegister(RC);
9233 Register OldValRot =
MRI.createVirtualRegister(RC);
9234 Register RetryOldVal =
MRI.createVirtualRegister(RC);
9235 Register RetrySwapVal =
MRI.createVirtualRegister(RC);
9310 if (!
MI.registerDefIsDead(SystemZ::CC,
nullptr))
9313 MI.eraseFromParent();
9326 .
add(
MI.getOperand(1))
9327 .
addImm(SystemZ::subreg_h64)
9328 .
add(
MI.getOperand(2))
9329 .
addImm(SystemZ::subreg_l64);
9330 MI.eraseFromParent();
9339 bool ClearEven)
const {
9347 Register In128 =
MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
9351 Register NewIn128 =
MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
9352 Register Zero64 =
MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
9363 MI.eraseFromParent();
9370 unsigned Opcode,
bool IsMemset)
const {
9377 uint64_t DestDisp =
MI.getOperand(1).getImm();
9383 if (!isUInt<12>(Disp)) {
9384 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9385 unsigned Opcode =
TII->getOpcodeForOffset(SystemZ::LA, Disp);
9395 SrcDisp =
MI.getOperand(3).getImm();
9398 SrcDisp = DestDisp++;
9399 foldDisplIfNeeded(DestBase, DestDisp);
9403 bool IsImmForm = LengthMO.
isImm();
9404 bool IsRegForm = !IsImmForm;
9411 unsigned Length) ->
void {
9430 bool NeedsLoop =
false;
9432 Register LenAdjReg = SystemZ::NoRegister;
9434 ImmLength = LengthMO.
getImm();
9435 ImmLength += IsMemset ? 2 : 1;
9436 if (ImmLength == 0) {
9437 MI.eraseFromParent();
9440 if (Opcode == SystemZ::CLC) {
9441 if (ImmLength > 3 * 256)
9451 }
else if (ImmLength > 6 * 256)
9459 LenAdjReg = LengthMO.
getReg();
9465 (Opcode == SystemZ::CLC && (ImmLength > 256 || NeedsLoop)
9471 MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
9473 TII->loadImmediate(*
MBB,
MI, StartCountReg, ImmLength / 256);
9484 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9488 if (DestBase.
isReg() && DestBase.
getReg() == SystemZ::NoRegister)
9489 DestBase = loadZeroAddress();
9490 if (SrcBase.
isReg() && SrcBase.
getReg() == SystemZ::NoRegister)
9491 SrcBase = HaveSingleBase ? DestBase : loadZeroAddress();
9501 (HaveSingleBase ? StartSrcReg :
forceReg(
MI, DestBase,
TII));
9504 Register ThisSrcReg =
MRI.createVirtualRegister(RC);
9506 (HaveSingleBase ? ThisSrcReg :
MRI.createVirtualRegister(RC));
9507 Register NextSrcReg =
MRI.createVirtualRegister(RC);
9509 (HaveSingleBase ? NextSrcReg :
MRI.createVirtualRegister(RC));
9510 RC = &SystemZ::GR64BitRegClass;
9511 Register ThisCountReg =
MRI.createVirtualRegister(RC);
9512 Register NextCountReg =
MRI.createVirtualRegister(RC);
9538 MBB = MemsetOneCheckMBB;
9581 if (EndMBB && !ImmLength)
9603 if (!HaveSingleBase)
9610 if (Opcode == SystemZ::MVC)
9637 if (!HaveSingleBase)
9659 Register RemSrcReg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9660 Register RemDestReg = HaveSingleBase ? RemSrcReg
9661 :
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9665 if (!HaveSingleBase)
9681 if (Opcode != SystemZ::MVC) {
9691 while (ImmLength > 0) {
9695 foldDisplIfNeeded(DestBase, DestDisp);
9696 foldDisplIfNeeded(SrcBase, SrcDisp);
9697 insertMemMemOp(
MBB,
MI, DestBase, DestDisp, SrcBase, SrcDisp, ThisLength);
9698 DestDisp += ThisLength;
9699 SrcDisp += ThisLength;
9700 ImmLength -= ThisLength;
9703 if (EndMBB && ImmLength > 0) {
9719 MI.eraseFromParent();
9732 uint64_t End1Reg =
MI.getOperand(0).getReg();
9733 uint64_t Start1Reg =
MI.getOperand(1).getReg();
9734 uint64_t Start2Reg =
MI.getOperand(2).getReg();
9735 uint64_t CharReg =
MI.getOperand(3).getReg();
9738 uint64_t This1Reg =
MRI.createVirtualRegister(RC);
9739 uint64_t This2Reg =
MRI.createVirtualRegister(RC);
9778 MI.eraseFromParent();
9785 bool NoFloat)
const {
9791 MI.setDesc(
TII->get(Opcode));
9795 uint64_t Control =
MI.getOperand(2).getImm();
9796 static const unsigned GPRControlBit[16] = {
9797 0x8000, 0x8000, 0x4000, 0x4000, 0x2000, 0x2000, 0x1000, 0x1000,
9798 0x0800, 0x0800, 0x0400, 0x0400, 0x0200, 0x0200, 0x0100, 0x0100
9800 Control |= GPRControlBit[15];
9802 Control |= GPRControlBit[11];
9803 MI.getOperand(2).setImm(Control);
9806 for (
int I = 0;
I < 16;
I++) {
9807 if ((Control & GPRControlBit[
I]) == 0) {
9814 if (!NoFloat && (Control & 4) != 0) {
9815 if (Subtarget.hasVector()) {
9847 MI.eraseFromParent();
9860 Register SizeReg =
MI.getOperand(2).getReg();
9872 Register PHIReg =
MRI->createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9873 Register IncReg =
MRI->createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9938 MI.eraseFromParent();
9942SDValue SystemZTargetLowering::
9953 switch (
MI.getOpcode()) {
9954 case SystemZ::ADJCALLSTACKDOWN:
9955 case SystemZ::ADJCALLSTACKUP:
9956 return emitAdjCallStack(
MI,
MBB);
9958 case SystemZ::Select32:
9959 case SystemZ::Select64:
9960 case SystemZ::Select128:
9961 case SystemZ::SelectF32:
9962 case SystemZ::SelectF64:
9963 case SystemZ::SelectF128:
9964 case SystemZ::SelectVR32:
9965 case SystemZ::SelectVR64:
9966 case SystemZ::SelectVR128:
9967 return emitSelect(
MI,
MBB);
9969 case SystemZ::CondStore8Mux:
9970 return emitCondStore(
MI,
MBB, SystemZ::STCMux, 0,
false);
9971 case SystemZ::CondStore8MuxInv:
9972 return emitCondStore(
MI,
MBB, SystemZ::STCMux, 0,
true);
9973 case SystemZ::CondStore16Mux:
9974 return emitCondStore(
MI,
MBB, SystemZ::STHMux, 0,
false);
9975 case SystemZ::CondStore16MuxInv:
9976 return emitCondStore(
MI,
MBB, SystemZ::STHMux, 0,
true);
9977 case SystemZ::CondStore32Mux:
9978 return emitCondStore(
MI,
MBB, SystemZ::STMux, SystemZ::STOCMux,
false);
9979 case SystemZ::CondStore32MuxInv:
9980 return emitCondStore(
MI,
MBB, SystemZ::STMux, SystemZ::STOCMux,
true);
9981 case SystemZ::CondStore8:
9982 return emitCondStore(
MI,
MBB, SystemZ::STC, 0,
false);
9983 case SystemZ::CondStore8Inv:
9984 return emitCondStore(
MI,
MBB, SystemZ::STC, 0,
true);
9985 case SystemZ::CondStore16:
9986 return emitCondStore(
MI,
MBB, SystemZ::STH, 0,
false);
9987 case SystemZ::CondStore16Inv:
9988 return emitCondStore(
MI,
MBB, SystemZ::STH, 0,
true);
9989 case SystemZ::CondStore32:
9990 return emitCondStore(
MI,
MBB, SystemZ::ST, SystemZ::STOC,
false);
9991 case SystemZ::CondStore32Inv:
9992 return emitCondStore(
MI,
MBB, SystemZ::ST, SystemZ::STOC,
true);
9993 case SystemZ::CondStore64:
9994 return emitCondStore(
MI,
MBB, SystemZ::STG, SystemZ::STOCG,
false);
9995 case SystemZ::CondStore64Inv:
9996 return emitCondStore(
MI,
MBB, SystemZ::STG, SystemZ::STOCG,
true);
9997 case SystemZ::CondStoreF32:
9998 return emitCondStore(
MI,
MBB, SystemZ::STE, 0,
false);
9999 case SystemZ::CondStoreF32Inv:
10000 return emitCondStore(
MI,
MBB, SystemZ::STE, 0,
true);
10001 case SystemZ::CondStoreF64:
10002 return emitCondStore(
MI,
MBB, SystemZ::STD, 0,
false);
10003 case SystemZ::CondStoreF64Inv:
10004 return emitCondStore(
MI,
MBB, SystemZ::STD, 0,
true);
10006 case SystemZ::SCmp128Hi:
10007 return emitICmp128Hi(
MI,
MBB,
false);
10008 case SystemZ::UCmp128Hi:
10009 return emitICmp128Hi(
MI,
MBB,
true);
10011 case SystemZ::PAIR128:
10012 return emitPair128(
MI,
MBB);
10013 case SystemZ::AEXT128:
10014 return emitExt128(
MI,
MBB,
false);
10015 case SystemZ::ZEXT128:
10016 return emitExt128(
MI,
MBB,
true);
10018 case SystemZ::ATOMIC_SWAPW:
10019 return emitAtomicLoadBinary(
MI,
MBB, 0);
10021 case SystemZ::ATOMIC_LOADW_AR:
10022 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::AR);
10023 case SystemZ::ATOMIC_LOADW_AFI:
10024 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::AFI);
10026 case SystemZ::ATOMIC_LOADW_SR:
10027 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::SR);
10029 case SystemZ::ATOMIC_LOADW_NR:
10030 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NR);
10031 case SystemZ::ATOMIC_LOADW_NILH:
10032 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NILH);
10034 case SystemZ::ATOMIC_LOADW_OR:
10035 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::OR);
10036 case SystemZ::ATOMIC_LOADW_OILH:
10037 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::OILH);
10039 case SystemZ::ATOMIC_LOADW_XR:
10040 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::XR);
10041 case SystemZ::ATOMIC_LOADW_XILF:
10042 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::XILF);
10044 case SystemZ::ATOMIC_LOADW_NRi:
10045 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NR,
true);
10046 case SystemZ::ATOMIC_LOADW_NILHi:
10047 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NILH,
true);
10049 case SystemZ::ATOMIC_LOADW_MIN:
10051 case SystemZ::ATOMIC_LOADW_MAX:
10053 case SystemZ::ATOMIC_LOADW_UMIN:
10055 case SystemZ::ATOMIC_LOADW_UMAX:
10058 case SystemZ::ATOMIC_CMP_SWAPW:
10059 return emitAtomicCmpSwapW(
MI,
MBB);
10060 case SystemZ::MVCImm:
10061 case SystemZ::MVCReg:
10062 return emitMemMemWrapper(
MI,
MBB, SystemZ::MVC);
10063 case SystemZ::NCImm:
10064 return emitMemMemWrapper(
MI,
MBB, SystemZ::NC);
10065 case SystemZ::OCImm:
10066 return emitMemMemWrapper(
MI,
MBB, SystemZ::OC);
10067 case SystemZ::XCImm:
10068 case SystemZ::XCReg:
10069 return emitMemMemWrapper(
MI,
MBB, SystemZ::XC);
10070 case SystemZ::CLCImm:
10071 case SystemZ::CLCReg:
10072 return emitMemMemWrapper(
MI,
MBB, SystemZ::CLC);
10073 case SystemZ::MemsetImmImm:
10074 case SystemZ::MemsetImmReg:
10075 case SystemZ::MemsetRegImm:
10076 case SystemZ::MemsetRegReg:
10077 return emitMemMemWrapper(
MI,
MBB, SystemZ::MVC,
true);
10078 case SystemZ::CLSTLoop:
10079 return emitStringWrapper(
MI,
MBB, SystemZ::CLST);
10080 case SystemZ::MVSTLoop:
10081 return emitStringWrapper(
MI,
MBB, SystemZ::MVST);
10082 case SystemZ::SRSTLoop:
10083 return emitStringWrapper(
MI,
MBB, SystemZ::SRST);
10084 case SystemZ::TBEGIN:
10085 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGIN,
false);
10086 case SystemZ::TBEGIN_nofloat:
10087 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGIN,
true);
10088 case SystemZ::TBEGINC:
10089 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGINC,
true);
10090 case SystemZ::LTEBRCompare_Pseudo:
10091 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTEBR);
10092 case SystemZ::LTDBRCompare_Pseudo:
10093 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTDBR);
10094 case SystemZ::LTXBRCompare_Pseudo:
10095 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTXBR);
10097 case SystemZ::PROBED_ALLOCA:
10098 return emitProbedAlloca(
MI,
MBB);
10099 case SystemZ::EH_SjLj_SetJmp:
10101 case SystemZ::EH_SjLj_LongJmp:
10104 case TargetOpcode::STACKMAP:
10105 case TargetOpcode::PATCHPOINT:
10116SystemZTargetLowering::getRepRegClassFor(
MVT VT)
const {
10117 if (VT == MVT::Untyped)
10118 return &SystemZ::ADDR128BitRegClass;
10144 DAG.
getMachineNode(SystemZ::EFPC, dl, {MVT::i32, MVT::Other}, Chain), 0);
10164 EVT VT =
Op.getValueType();
10165 Op =
Op.getOperand(0);
10166 EVT OpVT =
Op.getValueType();
10168 assert(OpVT.
isVector() &&
"Operand type for VECREDUCE_ADD is not a vector.");
10200bool SystemZTargetLowering::isFullyInternal(
const Function *Fn)
const {
10204 if (
auto *CB = dyn_cast<CallBase>(U)) {
10205 if (CB->getCalledFunction() != Fn)
10216 if (Attrs.hasRetAttrs())
10218 OS << *
F->getReturnType() <<
" @" <<
F->getName() <<
"(";
10219 for (
unsigned I = 0, E = FT->getNumParams();
I != E; ++
I) {
10222 OS << *FT->getParamType(
I);
10224 for (
auto A : {Attribute::SExt, Attribute::ZExt, Attribute::NoExt})
10231void SystemZTargetLowering::
10234 bool IsInternal =
false;
10235 const Function *CalleeFn =
nullptr;
10236 if (
auto *
G = dyn_cast<GlobalAddressSDNode>(Callee))
10237 if ((CalleeFn = dyn_cast<Function>(
G->getGlobal())))
10238 IsInternal = isFullyInternal(CalleeFn);
10239 if (!verifyNarrowIntegerArgs(Outs, IsInternal)) {
10240 errs() <<
"ERROR: Missing extension attribute of passed "
10241 <<
"value in call to function:\n" <<
"Callee: ";
10242 if (CalleeFn !=
nullptr)
10246 errs() <<
"Caller: ";
10252void SystemZTargetLowering::
10255 if (!verifyNarrowIntegerArgs(Outs, isFullyInternal(
F))) {
10256 errs() <<
"ERROR: Missing extension attribute of returned "
10257 <<
"value from function:\n";
10265bool SystemZTargetLowering::
10267 bool IsInternal)
const {
10282 for (
unsigned i = 0; i < Outs.
size(); ++i) {
10283 MVT VT = Outs[i].VT;
10287 "Unexpected integer argument VT.");
10288 if (VT == MVT::i32 &&
unsigned const MachineRegisterInfo * MRI
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
AMDGPU Register Bank Select
static bool isZeroVector(SDValue N)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
static bool isUndef(const MachineInstr &MI)
unsigned const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
static constexpr Register SPReg
static bool isSelectPseudo(MachineInstr &MI)
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static SDValue getI128Select(SelectionDAG &DAG, const SDLoc &DL, Comparison C, SDValue TrueOp, SDValue FalseOp)
static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static void printFunctionArgExts(const Function *F, raw_fd_ostream &OS)
static void adjustForLTGFR(Comparison &C)
static void adjustSubwordCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static SDValue joinDwords(SelectionDAG &DAG, const SDLoc &DL, SDValue Op0, SDValue Op1)
static cl::opt< bool > EnableIntArgExtCheck("argext-abi-check", cl::init(false), cl::desc("Verify that narrow int args are properly extended per the " "SystemZ ABI."))
static bool isOnlyUsedByStores(SDValue StoredVal, SelectionDAG &DAG)
static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT, unsigned Opcode, SDValue Op0, SDValue Op1, SDValue &Even, SDValue &Odd)
static void adjustForRedundantAnd(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static SDValue lowerAddrSpaceCast(SDValue Op, SelectionDAG &DAG)
static SDValue buildScalarToVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Value)
static SDValue lowerI128ToGR128(SelectionDAG &DAG, SDValue In)
static bool isSimpleShift(SDValue N, unsigned &ShiftVal)
static bool isI128MovedToParts(LoadSDNode *LD, SDNode *&LoPart, SDNode *&HiPart)
static bool chooseShuffleOpNos(int *OpNos, unsigned &OpNo0, unsigned &OpNo1)
static uint32_t findZeroVectorIdx(SDValue *Ops, unsigned Num)
static bool isVectorElementSwap(ArrayRef< int > M, EVT VT)
static void getCSAddressAndShifts(SDValue Addr, SelectionDAG &DAG, SDLoc DL, SDValue &AlignedAddr, SDValue &BitShift, SDValue &NegBitShift)
static bool isShlDoublePermute(const SmallVectorImpl< int > &Bytes, unsigned &StartIndex, unsigned &OpNo0, unsigned &OpNo1)
static SDValue getPermuteNode(SelectionDAG &DAG, const SDLoc &DL, const Permute &P, SDValue Op0, SDValue Op1)
static SDNode * emitIntrinsicWithCCAndChain(SelectionDAG &DAG, SDValue Op, unsigned Opcode)
static SDValue getCCResult(SelectionDAG &DAG, SDValue CCReg)
static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode, unsigned &CCValid)
static void lowerMUL_LOHI32(SelectionDAG &DAG, const SDLoc &DL, unsigned Extend, SDValue Op0, SDValue Op1, SDValue &Hi, SDValue &Lo)
static bool isF128MovedToParts(LoadSDNode *LD, SDNode *&LoPart, SDNode *&HiPart)
static void createPHIsForSelects(SmallVector< MachineInstr *, 8 > &Selects, MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB, MachineBasicBlock *SinkMBB)
static SDValue getGeneralPermuteNode(SelectionDAG &DAG, const SDLoc &DL, SDValue *Ops, const SmallVectorImpl< int > &Bytes)
static unsigned getVectorComparisonOrInvert(ISD::CondCode CC, CmpMode Mode, bool &Invert)
static unsigned CCMaskForCondCode(ISD::CondCode CC)
static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static void adjustForFNeg(Comparison &C)
static bool isScalarToVector(SDValue Op)
static SDValue emitSETCC(SelectionDAG &DAG, const SDLoc &DL, SDValue CCReg, unsigned CCValid, unsigned CCMask)
static bool matchPermute(const SmallVectorImpl< int > &Bytes, const Permute &P, unsigned &OpNo0, unsigned &OpNo1)
static bool isAddCarryChain(SDValue Carry)
static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static MachineOperand earlyUseOperand(MachineOperand Op)
static bool canUseSiblingCall(const CCState &ArgCCInfo, SmallVectorImpl< CCValAssign > &ArgLocs, SmallVectorImpl< ISD::OutputArg > &Outs)
static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask)
static bool getzOSCalleeAndADA(SelectionDAG &DAG, SDValue &Callee, SDValue &ADA, SDLoc &DL, SDValue &Chain)
static bool shouldSwapCmpOperands(const Comparison &C)
static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType)
static SDValue getADAEntry(SelectionDAG &DAG, SDValue Val, SDLoc DL, unsigned Offset, bool LoadAdr=false)
static SDNode * emitIntrinsicWithCC(SelectionDAG &DAG, SDValue Op, unsigned Opcode)
static void adjustForSubtraction(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static bool getVPermMask(SDValue ShuffleOp, SmallVectorImpl< int > &Bytes)
static const Permute PermuteForms[]
static bool isI128MovedFromParts(SDValue Val, SDValue &LoPart, SDValue &HiPart)
static bool isSubBorrowChain(SDValue Carry)
static void adjustICmp128(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static APInt getDemandedSrcElements(SDValue Op, const APInt &DemandedElts, unsigned OpNo)
static SDValue getAbsolute(SelectionDAG &DAG, const SDLoc &DL, SDValue Op, bool IsNegative)
static unsigned computeNumSignBitsBinOp(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth, unsigned OpNo)
static SDValue expandBitCastI128ToF128(SelectionDAG &DAG, SDValue Src, const SDLoc &SL)
static SDValue tryBuildVectorShuffle(SelectionDAG &DAG, BuildVectorSDNode *BVN)
static unsigned getVectorComparison(ISD::CondCode CC, CmpMode Mode)
static SDValue lowerGR128ToI128(SelectionDAG &DAG, SDValue In)
static SDValue MergeInputChains(SDNode *N1, SDNode *N2)
static SDValue expandBitCastF128ToI128(SelectionDAG &DAG, SDValue Src, const SDLoc &SL)
static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask, uint64_t Mask, uint64_t CmpVal, unsigned ICmpType)
static bool isIntrinsicWithCC(SDValue Op, unsigned &Opcode, unsigned &CCValid)
static SDValue expandV4F32ToV2F64(SelectionDAG &DAG, int Start, const SDLoc &DL, SDValue Op, SDValue Chain)
static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1, ISD::CondCode Cond, const SDLoc &DL, SDValue Chain=SDValue(), bool IsSignaling=false)
static bool checkCCKill(MachineInstr &MI, MachineBasicBlock *MBB)
static Register forceReg(MachineInstr &MI, MachineOperand &Base, const SystemZInstrInfo *TII)
static bool is32Bit(EVT VT)
static std::pair< unsigned, const TargetRegisterClass * > parseRegisterNumber(StringRef Constraint, const TargetRegisterClass *RC, const unsigned *Map, unsigned Size)
static bool matchDoublePermute(const SmallVectorImpl< int > &Bytes, const Permute &P, SmallVectorImpl< int > &Transform)
static Comparison getIntrinsicCmp(SelectionDAG &DAG, unsigned Opcode, SDValue Call, unsigned CCValid, uint64_t CC, ISD::CondCode Cond)
static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg)
static AddressingMode getLoadStoreAddrMode(bool HasVector, Type *Ty)
static SDValue buildMergeScalars(SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Op0, SDValue Op1)
static void computeKnownBitsBinOp(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth, unsigned OpNo)
static bool getShuffleInput(const SmallVectorImpl< int > &Bytes, unsigned Start, unsigned BytesPerElement, int &Base)
static AddressingMode supportedAddressingMode(Instruction *I, bool HasVector)
static bool isF128MovedFromParts(SDValue Val, SDValue &LoPart, SDValue &HiPart)
static void adjustZeroCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
APInt zext(unsigned width) const
Zero extend to a new width.
static APInt getSignMask(unsigned BitWidth)
Get the SignMask for a specific bit width.
uint64_t getZExtValue() const
Get zero extended value.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
unsigned getActiveBits() const
Compute the number of active bits in the value.
APInt trunc(unsigned width) const
Truncate to new width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isSingleWord() const
Determine if this APInt just has one word to store value.
void insertBits(const APInt &SubBits, unsigned bitPosition)
Insert the bits from a smaller APInt starting at bitPosition.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
an instruction that atomically reads a memory location, combines it with another value,...
BinOp getOperation() const
bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
StringRef getValueAsString() const
Return the attribute's value as a string.
static StringRef getNameFromAttrKind(Attribute::AttrKind AttrKind)
LLVM Basic Block Representation.
The address of a basic block.
A "pseudo-class" with methods for operating on BUILD_VECTORs.
bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector.
CCState - This class holds information needed while lowering arguments and return values.
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
LocInfo getLocInfo() const
int64_t getLocMemOffset() const
This class represents a function call, abstracting a target machine's calling convention.
This is an important base class in LLVM.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
iterator find(const_arg_type_t< KeyT > Val)
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
uint64_t getFnAttributeAsParsedInteger(StringRef Kind, uint64_t Default=0) const
For a string attribute Kind, parse attribute as an integer.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
const GlobalObject * getAliaseeObject() const
bool hasLocalLinkage() const
bool hasPrivateLinkage() const
bool hasInternalLinkage() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
This class is used to represent ISD::LOAD nodes.
static auto integer_fixedlen_vector_valuetypes()
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static MVT getVectorVT(MVT VT, unsigned NumElements)
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator_range< succ_iterator > successors()
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
void setMachineBlockAddressTaken()
Set this block to indicate that its address is used as something other than the target of a terminato...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setMaxCallFrameSize(uint64_t S)
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setAdjustsStack(bool V)
void setFrameAddressIsTaken(bool T)
uint64_t getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
void setReturnAddressIsTaken(bool s)
MachineFunctionProperties & reset(Property P)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
void push_back(MachineBasicBlock *MBB)
reverse_iterator rbegin()
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineFunctionProperties & getProperties() const
Get the function properties.
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr reads the specified register.
bool killsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr kills the specified register.
bool definesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr fully defines the specified register.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
Register getReg() const
getReg - Returns the register number.
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
bool hasOneUse() const
Return true if there is exactly one use of this node.
SDNodeFlags getFlags() const
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
iterator_range< user_iterator > users()
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
void setFlags(SDNodeFlags NewFlags)
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
bool isMachineOpcode() const
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
const APInt & getConstantOperandAPInt(unsigned i) const
uint64_t getScalarValueSizeInBits() const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
unsigned getMachineOpcode() const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
const TargetSubtargetInfo & getSubtarget() const
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
bool isConstantIntBuildVectorOrConstantInt(SDValue N, bool AllowOpaques=true) const
Test whether the given value is a constant int or similar node.
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
SDValue getRegister(Register Reg, EVT VT)
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getGLOBAL_OFFSET_TABLE(EVT VT)
Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, MachineMemOperand *MMO)
Gets a node for an atomic op, produces result (if relevant) and chain and takes 2 operands.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), BatchAAResults *BatchAA=nullptr)
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
const DataLayout & getDataLayout() const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
bool SignBitIsZero(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero.
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getRegisterMask(const uint32_t *RegMask)
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
ArrayRef< int > getMask() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
constexpr size_t size() const
size - Get the string size.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
A SystemZ-specific class detailing special use registers particular for calling conventions.
virtual int getStackPointerBias()=0
virtual int getReturnFunctionAddressRegister()=0
virtual int getCallFrameSize()=0
virtual int getStackPointerRegister()=0
A SystemZ-specific constant pool value.
static SystemZConstantPoolValue * Create(const GlobalValue *GV, SystemZCP::SystemZCPModifier Modifier)
unsigned getVarArgsFrameIndex() const
void setVarArgsFrameIndex(unsigned FI)
void setRegSaveFrameIndex(unsigned FI)
void incNumLocalDynamicTLSAccesses()
Register getVarArgsFirstGPR() const
void setADAVirtualRegister(Register Reg)
void setVarArgsFirstGPR(Register GPR)
Register getADAVirtualRegister() const
void setSizeOfFnParams(unsigned Size)
void setVarArgsFirstFPR(Register FPR)
unsigned getRegSaveFrameIndex() const
Register getVarArgsFirstFPR() const
const SystemZInstrInfo * getInstrInfo() const override
bool isPC32DBLSymbol(const GlobalValue *GV, CodeModel::Model CM) const
const TargetFrameLowering * getFrameLowering() const override
bool isTargetXPLINK64() const
SystemZCallingConventionRegisters * getSpecialRegisters() const
const SystemZRegisterInfo * getRegisterInfo() const override
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes) const override
Returns the target specific optimal type for load and store operations as a result of memset,...
bool hasInlineStackProbe(const MachineFunction &MF) const override
Returns true if stack probing through inline assembly is requested.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *BB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
MachineBasicBlock * emitEHSjLjSetJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
AtomicExpansionKind shouldCastAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be cast by the IR-level AtomicExpand pass.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &, EVT) const override
Return the ValueType of the result of SETCC operations.
bool allowTruncateForTailCall(Type *, Type *) const override
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
MachineBasicBlock * emitEHSjLjLongJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
bool findOptimalMemOpLowering(std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const override
Determines the optimal series of memory ops to replace the memset / memcpy.
bool useSoftFloat() const override
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context, const Type *RetTy) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
std::pair< SDValue, SDValue > makeExternalCall(SDValue Chain, SelectionDAG &DAG, const char *CalleeName, EVT RetVT, ArrayRef< SDValue > Ops, CallingConv::ID CallConv, bool IsSigned, SDLoc DL, bool DoesNotReturn, bool IsReturnValueUsed) const
bool mayBeEmittedAsTailCall(const CallInst *CI) const override
Return true if the target may be able emit the call instruction as a tail call.
bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const override
Target-specific splitting of values into parts that fit a register storing a legal type.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
SystemZTargetLowering(const TargetMachine &TM, const SystemZSubtarget &STI)
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
bool isLegalICmpImmediate(int64_t Imm) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
TargetLowering::ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const override
Determine if the target supports unaligned memory accesses.
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
TargetLowering::ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const override
Target-specific combining of register parts into its original value.
bool isTruncateFree(Type *, Type *) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const override
Determine the number of bits in the operation that are sign bits.
void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
bool isLegalAddImmediate(int64_t Imm) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
bool isGuaranteedNotToBeUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, unsigned Depth) const override
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
AtomicExpansionKind shouldCastAtomicStoreInIR(StoreInst *SI) const override
Returns how the given (atomic) store should be cast by the IR-level AtomicExpand pass into.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
unsigned getStackProbeSize(const MachineFunction &MF) const
XPLINK64 calling convention specific use registers Particular to z/OS when in 64 bit mode.
int getCallFrameSize() final
Information about stack frame layout on the target.
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setAtomicLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Let target indicate that an extending atomic load of the specified type is legal.
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
virtual const TargetRegisterClass * getRepRegClassFor(MVT VT) const
Return the 'representative' register class for the specified value type.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
virtual bool shouldSignExtendTypeInLibCall(Type *Ty, bool IsSigned) const
Returns true if arguments should be sign-extended in lib calls.
std::vector< ArgListEntry > ArgListTy
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool findOptimalMemOpLowering(std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const
Determines the optimal series of memory ops to replace the memset / memcpy.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Primary interface to the complete machine description for the target machine.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
unsigned getPointerSize(unsigned AS) const
Get the pointer size for this target.
CodeModel::Model getCodeModel() const
Returns the code model.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
A Use represents the edge between a Value definition and its users.
User * getUser() const
Returns the User that contains this Use.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
user_iterator user_begin()
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
int getNumOccurrences() const
constexpr ScalarTy getFixedValue() const
self_iterator getIterator()
A raw_ostream that writes to a file descriptor.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ EH_SJLJ_LONGJMP
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ MEMBARRIER
MEMBARRIER - Compiler barrier only; generate a no-op.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ STRICT_FSQRT
Constrained versions of libm-equivalent floating point intrinsics.
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
@ BR_JT
BR_JT - Jumptable branch.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ UNDEF
UNDEF - An undefined node.
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ GET_ROUNDING
Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest, ties to even 2 Round to ...
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ STRICT_SINT_TO_FP
STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to a floating point value.
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ STRICT_FADD
Constrained versions of the binary floating point operators.
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ ADDRSPACECAST
ADDRSPACECAST - This operator converts between pointers of different address spaces.
@ EH_SJLJ_SETJMP
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj....
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ BRCOND
BRCOND - Conditional branch.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ GET_DYNAMIC_AREA_OFFSET
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isNormalStore(const SDNode *N)
Returns true if the specified node is a non-truncating and unindexed store.
bool isConstantSplatVectorAllZeros(const SDNode *N, bool BuildVectorOnly=false)
Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where all of the elements are 0 o...
CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
ID ArrayRef< Type * > Tys
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
@ Define
Register definition.
@ System
Synchronized with respect to all concurrently executing threads.
@ MO_ADA_DATA_SYMBOL_ADDR
@ MO_ADA_DIRECT_FUNC_DESC
@ MO_ADA_INDIRECT_FUNC_DESC
const unsigned GR64Regs[16]
const unsigned VR128Regs[32]
const unsigned GR128Regs[16]
const unsigned FP32Regs[16]
const unsigned GR32Regs[16]
const unsigned FP64Regs[16]
const int64_t ELFCallFrameSize
const unsigned VR64Regs[32]
const unsigned FP128Regs[16]
const unsigned VR32Regs[32]
unsigned odd128(bool Is32bit)
const unsigned CCMASK_CMP_GE
static bool isImmHH(uint64_t Val)
const unsigned CCMASK_TEND
const unsigned CCMASK_CS_EQ
const unsigned CCMASK_TBEGIN
const MCPhysReg ELFArgFPRs[ELFNumArgFPRs]
MachineBasicBlock * splitBlockBefore(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
const unsigned CCMASK_TM_SOME_1
const unsigned CCMASK_LOGICAL_CARRY
const unsigned TDCMASK_NORMAL_MINUS
const unsigned CCMASK_TDC
const unsigned CCMASK_FCMP
const unsigned CCMASK_TM_SOME_0
static bool isImmHL(uint64_t Val)
const unsigned TDCMASK_SUBNORMAL_MINUS
const unsigned TDCMASK_NORMAL_PLUS
const unsigned CCMASK_CMP_GT
const unsigned TDCMASK_QNAN_MINUS
const unsigned CCMASK_ANY
const unsigned CCMASK_ARITH
const unsigned CCMASK_TM_MIXED_MSB_0
const unsigned TDCMASK_SUBNORMAL_PLUS
static bool isImmLL(uint64_t Val)
const unsigned VectorBits
static bool isImmLH(uint64_t Val)
MachineBasicBlock * emitBlockAfter(MachineBasicBlock *MBB)
const unsigned TDCMASK_INFINITY_PLUS
unsigned reverseCCMask(unsigned CCMask)
const unsigned CCMASK_TM_ALL_0
const unsigned CCMASK_CMP_LE
const unsigned CCMASK_CMP_O
const unsigned CCMASK_CMP_EQ
const unsigned VectorBytes
const unsigned TDCMASK_INFINITY_MINUS
const unsigned CCMASK_ICMP
const unsigned CCMASK_VCMP_ALL
MachineBasicBlock * splitBlockAfter(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
const unsigned CCMASK_VCMP
const unsigned CCMASK_TM_MIXED_MSB_1
const unsigned CCMASK_TM_MSB_0
const unsigned CCMASK_ARITH_OVERFLOW
const unsigned CCMASK_CS_NE
const unsigned TDCMASK_SNAN_PLUS
const unsigned CCMASK_CMP_LT
const unsigned CCMASK_CMP_NE
const unsigned TDCMASK_ZERO_PLUS
const unsigned TDCMASK_QNAN_PLUS
const unsigned TDCMASK_ZERO_MINUS
unsigned even128(bool Is32bit)
const unsigned CCMASK_TM_ALL_1
const unsigned CCMASK_LOGICAL_BORROW
const unsigned ELFNumArgFPRs
const unsigned CCMASK_CMP_UO
const unsigned CCMASK_LOGICAL
const unsigned CCMASK_TM_MSB_1
const unsigned TDCMASK_SNAN_MINUS
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
support::ulittle32_t Word
NodeAddr< CodeNode * > Code
constexpr const char32_t SBase
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
testing::Matcher< const detail::ErrorHolder & > Failed()
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void dumpBytes(ArrayRef< uint8_t > Bytes, raw_ostream &OS)
Convert ‘Bytes’ to a hex string and output to ‘OS’.
T bit_ceil(T Value)
Returns the smallest integral power of two no smaller than Value if Value is nonzero.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
@ Mul
Product of integers.
DWARFExpression::Operation Op
constexpr unsigned BitWidth
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
AddressingMode(bool LongDispl, bool IdxReg)
This struct is a compact representation of a valid (non-zero power of two) alignment.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isRound() const
Return true if the size is a power-of-two number of bytes.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
unsigned getBitWidth() const
Get the bit width of this value.
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
void resetAll()
Resets the known state of all bits.
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
const uint32_t * getNoPreservedMask() const override
SystemZVectorConstantInfo(APInt IntImm)
SmallVector< unsigned, 2 > OpVals
bool isVectorConstantLegal(const SystemZSubtarget &Subtarget)
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This contains information for each constraint that we are lowering.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setZExtResult(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setSExtResult(bool Value=true)
CallLoweringInfo & setNoReturn(bool Value=true)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})