26#include "llvm/IR/IntrinsicsS390.h"
35#define DEBUG_TYPE "systemz-lower"
41 cl::desc(
"Verify that narrow int args are properly extended per the "
48 : Op0(Op0In), Op1(Op1In), Chain(ChainIn),
49 Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {}
99 if (Subtarget.hasHighWord())
105 if (Subtarget.hasVector()) {
112 if (Subtarget.hasVectorEnhancements1())
117 if (Subtarget.hasVector()) {
126 if (Subtarget.hasVector())
153 for (
unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
154 I <= MVT::LAST_FP_VALUETYPE;
180 for (
unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
181 I <= MVT::LAST_INTEGER_VALUETYPE;
212 if (Subtarget.hasPopulationCount())
231 if (!Subtarget.hasFPExtension())
237 if (Subtarget.hasFPExtension())
242 if (Subtarget.hasFPExtension())
311 {MVT::i8, MVT::i16, MVT::i32},
Legal);
313 {MVT::i8, MVT::i16},
Legal);
330 if (!Subtarget.hasFPExtension()) {
343 if (Subtarget.hasMiscellaneousExtensions3()) {
436 if (VT != MVT::v2i64)
442 if (Subtarget.hasVectorEnhancements1())
473 if (Subtarget.hasVector()) {
495 if (Subtarget.hasVectorEnhancements2()) {
516 for (
unsigned I = MVT::FIRST_FP_VALUETYPE;
517 I <= MVT::LAST_FP_VALUETYPE;
525 if (Subtarget.hasFPExtension()) {
553 if (Subtarget.hasFPExtension()) {
564 if (Subtarget.hasVector()) {
610 if (Subtarget.hasVectorEnhancements1()) {
617 if (Subtarget.hasVectorEnhancements1()) {
671 for (
auto VT : { MVT::f32, MVT::f64, MVT::f128,
672 MVT::v4f32, MVT::v2f64 }) {
681 if (!Subtarget.hasVectorEnhancements1()) {
687 if (Subtarget.hasVectorEnhancements1())
697 if (Subtarget.hasVectorEnhancements1()) {
709 if (!Subtarget.hasVector()) {
775 struct RTLibCallMapping {
779 static RTLibCallMapping RTLibCallCommon[] = {
780#define HANDLE_LIBCALL(code, name) {RTLIB::code, name},
781#include "ZOSLibcallNames.def"
783 for (
auto &E : RTLibCallCommon)
789 return Subtarget.hasSoftFloat();
814 return Subtarget.hasVectorEnhancements1();
827 if (!Subtarget.hasVector() ||
828 (isFP128 && !Subtarget.hasVectorEnhancements1()))
850 if (SplatBitSize > 64)
856 if (isInt<16>(SignedValue)) {
865 if (
TII->isRxSBGMask(
Value, SplatBitSize, Start,
End)) {
887 uint64_t Lower = SplatUndefZ & maskTrailingOnes<uint64_t>(LowerBits);
888 uint64_t Upper = SplatUndefZ & maskLeadingOnes<uint64_t>(UpperBits);
895 uint64_t Middle = SplatUndefZ & ~Upper & ~Lower;
896 return tryValue(SplatBitsZ | Middle);
911 unsigned HalfSize = Width / 2;
916 if (HighValue != LowValue || 8 > HalfSize)
919 SplatBits = HighValue;
923 SplatBitSize = Width;
931 BVN->
isConstantSplat(IntBits, SplatUndef, SplatBitSize, HasAnyUndefs, 128,
935 BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, 8,
940 bool ForCodeSize)
const {
942 if (Imm.isZero() || Imm.isNegZero())
963 assert(
TRI->isTypeLegalForClass(*RC, MVT::i32) &&
"Invalid destination!");
965 Register mainDstReg =
MRI.createVirtualRegister(RC);
966 Register restoreDstReg =
MRI.createVirtualRegister(RC);
969 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
"Invalid Pointer Size!");
1022 const int64_t FPOffset = 0;
1031 unsigned LabelReg =
MRI.createVirtualRegister(PtrRC);
1047 .
addReg(SpecialRegs->getFramePointerRegister())
1055 .
addReg(SpecialRegs->getStackPointerRegister())
1063 Register BCReg =
MRI.createVirtualRegister(PtrRC);
1066 .
addReg(SpecialRegs->getStackPointerRegister())
1067 .
addImm(TFL->getBackchainOffset(*MF))
1078 MIB =
BuildMI(*thisMBB,
MI,
DL,
TII->get(SystemZ::EH_SjLj_Setup))
1103 MI.eraseFromParent();
1119 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
"Invalid Pointer Size!");
1129 const int64_t FPOffset = 0;
1141 SpecialRegs->getFramePointerRegister())
1163 SpecialRegs->getStackPointerRegister())
1172 .
addReg(SpecialRegs->getStackPointerRegister())
1173 .
addImm(TFL->getBackchainOffset(*MF))
1179 MI.eraseFromParent();
1209 if (Subtarget.hasInterlockedAccess1() &&
1223 return isInt<32>(Imm) || isUInt<32>(Imm);
1228 return isUInt<32>(Imm) || isUInt<32>(-Imm);
1250 LongDisplacement(LongDispl), IndexReg(IdxReg) {}
1273 switch (
II->getIntrinsicID()) {
1275 case Intrinsic::memset:
1276 case Intrinsic::memmove:
1277 case Intrinsic::memcpy:
1282 if (isa<LoadInst>(
I) &&
I->hasOneUse()) {
1283 auto *SingleUser = cast<Instruction>(*
I->user_begin());
1284 if (SingleUser->getParent() ==
I->getParent()) {
1285 if (isa<ICmpInst>(SingleUser)) {
1286 if (
auto *
C = dyn_cast<ConstantInt>(SingleUser->getOperand(1)))
1287 if (
C->getBitWidth() <= 64 &&
1288 (isInt<16>(
C->getSExtValue()) || isUInt<16>(
C->getZExtValue())))
1291 }
else if (isa<StoreInst>(SingleUser))
1295 }
else if (
auto *StoreI = dyn_cast<StoreInst>(
I)) {
1296 if (
auto *LoadI = dyn_cast<LoadInst>(StoreI->getValueOperand()))
1297 if (LoadI->hasOneUse() && LoadI->getParent() ==
I->getParent())
1302 if (HasVector && (isa<LoadInst>(
I) || isa<StoreInst>(
I))) {
1310 Type *MemAccessTy = (isa<LoadInst>(
I) ?
I->getType() :
1311 I->getOperand(0)->getType());
1313 bool IsVectorAccess = MemAccessTy->
isVectorTy();
1317 if (!IsVectorAccess && isa<StoreInst>(
I)) {
1318 Value *DataOp =
I->getOperand(0);
1319 if (isa<ExtractElementInst>(DataOp))
1320 IsVectorAccess =
true;
1325 if (!IsVectorAccess && isa<LoadInst>(
I) &&
I->hasOneUse()) {
1326 User *LoadUser = *
I->user_begin();
1327 if (isa<InsertElementInst>(LoadUser))
1328 IsVectorAccess =
true;
1331 if (IsFPAccess || IsVectorAccess)
1360 return AM.
Scale == 0;
1367 std::vector<EVT> &MemOps,
unsigned Limit,
const MemOp &
Op,
unsigned DstAS,
1368 unsigned SrcAS,
const AttributeList &FuncAttributes)
const {
1369 const int MVCFastLen = 16;
1371 if (Limit != ~
unsigned(0)) {
1373 if (
Op.isMemcpy() &&
Op.allowOverlap() &&
Op.size() <= MVCFastLen)
1375 if (
Op.isMemset() &&
Op.size() - 1 <= MVCFastLen)
1377 if (
Op.isZeroMemset())
1382 SrcAS, FuncAttributes);
1387 return Subtarget.hasVector() ? MVT::v2i64 : MVT::Other;
1391 if (!FromType->isIntegerTy() || !ToType->
isIntegerTy())
1393 unsigned FromBits = FromType->getPrimitiveSizeInBits().getFixedValue();
1395 return FromBits > ToBits;
1403 return FromBits > ToBits;
1412 if (Constraint.
size() == 1) {
1413 switch (Constraint[0]) {
1439 }
else if (Constraint.
size() == 2 && Constraint[0] ==
'Z') {
1440 switch (Constraint[1]) {
1456 const char *constraint)
const {
1458 Value *CallOperandVal =
info.CallOperandVal;
1461 if (!CallOperandVal)
1465 switch (*constraint) {
1483 if (Subtarget.hasVector())
1489 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1490 if (isUInt<8>(
C->getZExtValue()))
1495 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1496 if (isUInt<12>(
C->getZExtValue()))
1501 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1502 if (isInt<16>(
C->getSExtValue()))
1507 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1508 if (isInt<20>(
C->getSExtValue()))
1513 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1514 if (
C->getZExtValue() == 0x7fffffff)
1524static std::pair<unsigned, const TargetRegisterClass *>
1526 const unsigned *Map,
unsigned Size) {
1527 assert(*(Constraint.
end()-1) ==
'}' &&
"Missing '}'");
1528 if (isdigit(Constraint[2])) {
1533 return std::make_pair(Map[Index], RC);
1535 return std::make_pair(0U,
nullptr);
1538std::pair<unsigned, const TargetRegisterClass *>
1541 if (Constraint.
size() == 1) {
1543 switch (Constraint[0]) {
1548 return std::make_pair(0U, &SystemZ::GR64BitRegClass);
1550 return std::make_pair(0U, &SystemZ::GR128BitRegClass);
1551 return std::make_pair(0U, &SystemZ::GR32BitRegClass);
1555 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass);
1556 else if (VT == MVT::i128)
1557 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass);
1558 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass);
1561 return std::make_pair(0U, &SystemZ::GRH32BitRegClass);
1566 return std::make_pair(0U, &SystemZ::FP64BitRegClass);
1568 return std::make_pair(0U, &SystemZ::FP128BitRegClass);
1569 return std::make_pair(0U, &SystemZ::FP32BitRegClass);
1574 if (Subtarget.hasVector()) {
1576 return std::make_pair(0U, &SystemZ::VR32BitRegClass);
1578 return std::make_pair(0U, &SystemZ::VR64BitRegClass);
1579 return std::make_pair(0U, &SystemZ::VR128BitRegClass);
1588 auto getVTSizeInBits = [&VT]() {
1596 if (Constraint[1] ==
'r') {
1597 if (getVTSizeInBits() == 32)
1600 if (getVTSizeInBits() == 128)
1606 if (Constraint[1] ==
'f') {
1608 return std::make_pair(
1610 if (getVTSizeInBits() == 32)
1613 if (getVTSizeInBits() == 128)
1619 if (Constraint[1] ==
'v') {
1620 if (!Subtarget.hasVector())
1621 return std::make_pair(
1623 if (getVTSizeInBits() == 32)
1626 if (getVTSizeInBits() == 64)
1644 : SystemZ::NoRegister)
1646 Subtarget.
isTargetELF() ? SystemZ::R15D : SystemZ::NoRegister)
1647 .
Default(SystemZ::NoRegister);
1655 const Constant *PersonalityFn)
const {
1660 const Constant *PersonalityFn)
const {
1668 if (Constraint.
size() == 1) {
1669 switch (Constraint[0]) {
1671 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1672 if (isUInt<8>(
C->getZExtValue()))
1674 Op.getValueType()));
1678 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1679 if (isUInt<12>(
C->getZExtValue()))
1681 Op.getValueType()));
1685 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1686 if (isInt<16>(
C->getSExtValue()))
1688 C->getSExtValue(),
SDLoc(
Op),
Op.getValueType()));
1692 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1693 if (isInt<20>(
C->getSExtValue()))
1695 C->getSExtValue(),
SDLoc(
Op),
Op.getValueType()));
1699 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1700 if (
C->getZExtValue() == 0x7fffffff)
1702 Op.getValueType()));
1713#include "SystemZGenCallingConv.inc"
1717 static const MCPhysReg ScratchRegs[] = { SystemZ::R0D, SystemZ::R1D,
1723 Type *ToType)
const {
1786 if (BitCastToType == MVT::v2i64)
1813 MVT::Untyped,
Hi,
Lo);
1837 unsigned NumParts,
MVT PartVT, std::optional<CallingConv::ID>
CC)
const {
1839 if (ValueVT.
getSizeInBits() == 128 && NumParts == 1 && PartVT == MVT::Untyped) {
1850 MVT PartVT,
EVT ValueVT, std::optional<CallingConv::ID>
CC)
const {
1851 if (ValueVT.
getSizeInBits() == 128 && NumParts == 1 && PartVT == MVT::Untyped) {
1878 unsigned NumFixedGPRs = 0;
1879 unsigned NumFixedFPRs = 0;
1880 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
1893 RC = &SystemZ::GR32BitRegClass;
1897 RC = &SystemZ::GR64BitRegClass;
1901 RC = &SystemZ::FP32BitRegClass;
1905 RC = &SystemZ::FP64BitRegClass;
1909 RC = &SystemZ::FP128BitRegClass;
1917 RC = &SystemZ::VR128BitRegClass;
1946 ArgValue = DAG.
getLoad(LocVT,
DL, Chain, FIN,
1957 unsigned ArgIndex = Ins[
I].OrigArgIndex;
1958 assert (Ins[
I].PartOffset == 0);
1959 while (
I + 1 != E && Ins[
I + 1].OrigArgIndex == ArgIndex) {
1961 unsigned PartOffset = Ins[
I + 1].PartOffset;
1984 int64_t VarArgOffset = CCInfo.
getStackSize() + Regs->getCallFrameSize();
2002 int64_t RegSaveOffset =
2017 &SystemZ::FP64BitRegClass);
2035 MRI.addLiveIn(Regs->getADARegister(), ADAvReg);
2047 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
2054 if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D)
2056 if (Outs[
I].Flags.isSwiftSelf() || Outs[
I].Flags.isSwiftError())
2063 unsigned Offset,
bool LoadAdr =
false) {
2086 bool LoadAddr =
false;
2087 const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV);
2108 unsigned ADADelta = 0;
2109 unsigned EPADelta = 8;
2114 if (
auto *
G = dyn_cast<GlobalAddressSDNode>(Callee)) {
2115 bool IsInternal = (
G->getGlobal()->hasInternalLinkage() ||
2116 G->getGlobal()->hasPrivateLinkage());
2131 }
else if (
auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
2173 verifyNarrowIntegerArgs_Call(Outs, &MF.
getFunction(), Callee);
2196 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
2202 unsigned ArgIndex = Outs[
I].OrigArgIndex;
2204 if (
I + 1 != E && Outs[
I + 1].OrigArgIndex == ArgIndex) {
2206 Type *OrigArgType = CLI.
Args[Outs[
I].OrigArgIndex].Ty;
2212 SlotVT = Outs[
I].VT;
2215 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
2221 assert (Outs[
I].PartOffset == 0);
2222 while (
I + 1 != E && Outs[
I + 1].OrigArgIndex == ArgIndex) {
2223 SDValue PartValue = OutVals[
I + 1];
2224 unsigned PartOffset = Outs[
I + 1].PartOffset;
2231 SlotVT.
getStoreSize()) &&
"Not enough space for argument part!");
2234 ArgValue = SpillSlot;
2251 if (!StackPtr.getNode())
2273 RegsToPass.
push_back(std::make_pair(SystemZ::R3D, ShadowArgValue));
2279 if (!MemOpChains.
empty())
2292 ->getAddressOfCalleeRegister();
2295 Callee = DAG.
getRegister(CalleeReg, Callee.getValueType());
2300 if (
auto *
G = dyn_cast<GlobalAddressSDNode>(Callee)) {
2303 }
else if (
auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
2306 }
else if (IsTailCall) {
2309 Callee = DAG.
getRegister(SystemZ::R1D, Callee.getValueType());
2314 for (
unsigned I = 0, E = RegsToPass.
size();
I != E; ++
I) {
2316 RegsToPass[
I].second, Glue);
2327 for (
unsigned I = 0, E = RegsToPass.
size();
I != E; ++
I)
2329 RegsToPass[
I].second.getValueType()));
2333 const uint32_t *Mask =
TRI->getCallPreservedMask(MF, CallConv);
2334 assert(Mask &&
"Missing call preserved mask for calling convention");
2358 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, Ctx);
2365 VA.getLocVT(), Glue);
2382 bool DoesNotReturn,
bool IsReturnValueUsed)
const {
2384 Args.reserve(Ops.
size());
2389 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.
getContext());
2391 Entry.IsZExt = !Entry.IsSExt;
2392 Args.push_back(Entry);
2419 for (
auto &Out : Outs)
2420 if (Out.ArgVT == MVT::i128)
2424 CCState RetCCInfo(CallConv, isVarArg, MF, RetLocs, Context);
2425 return RetCCInfo.
CheckReturn(Outs, RetCC_SystemZ);
2437 verifyNarrowIntegerArgs_Ret(Outs, &MF.
getFunction());
2445 if (RetLocs.
empty())
2455 for (
unsigned I = 0, E = RetLocs.
size();
I != E; ++
I) {
2484 unsigned &CCValid) {
2485 unsigned Id =
Op.getConstantOperandVal(1);
2487 case Intrinsic::s390_tbegin:
2492 case Intrinsic::s390_tbegin_nofloat:
2497 case Intrinsic::s390_tend:
2511 unsigned Id =
Op.getConstantOperandVal(0);
2513 case Intrinsic::s390_vpkshs:
2514 case Intrinsic::s390_vpksfs:
2515 case Intrinsic::s390_vpksgs:
2520 case Intrinsic::s390_vpklshs:
2521 case Intrinsic::s390_vpklsfs:
2522 case Intrinsic::s390_vpklsgs:
2527 case Intrinsic::s390_vceqbs:
2528 case Intrinsic::s390_vceqhs:
2529 case Intrinsic::s390_vceqfs:
2530 case Intrinsic::s390_vceqgs:
2535 case Intrinsic::s390_vchbs:
2536 case Intrinsic::s390_vchhs:
2537 case Intrinsic::s390_vchfs:
2538 case Intrinsic::s390_vchgs:
2543 case Intrinsic::s390_vchlbs:
2544 case Intrinsic::s390_vchlhs:
2545 case Intrinsic::s390_vchlfs:
2546 case Intrinsic::s390_vchlgs:
2551 case Intrinsic::s390_vtm:
2556 case Intrinsic::s390_vfaebs:
2557 case Intrinsic::s390_vfaehs:
2558 case Intrinsic::s390_vfaefs:
2563 case Intrinsic::s390_vfaezbs:
2564 case Intrinsic::s390_vfaezhs:
2565 case Intrinsic::s390_vfaezfs:
2570 case Intrinsic::s390_vfeebs:
2571 case Intrinsic::s390_vfeehs:
2572 case Intrinsic::s390_vfeefs:
2577 case Intrinsic::s390_vfeezbs:
2578 case Intrinsic::s390_vfeezhs:
2579 case Intrinsic::s390_vfeezfs:
2584 case Intrinsic::s390_vfenebs:
2585 case Intrinsic::s390_vfenehs:
2586 case Intrinsic::s390_vfenefs:
2591 case Intrinsic::s390_vfenezbs:
2592 case Intrinsic::s390_vfenezhs:
2593 case Intrinsic::s390_vfenezfs:
2598 case Intrinsic::s390_vistrbs:
2599 case Intrinsic::s390_vistrhs:
2600 case Intrinsic::s390_vistrfs:
2605 case Intrinsic::s390_vstrcbs:
2606 case Intrinsic::s390_vstrchs:
2607 case Intrinsic::s390_vstrcfs:
2612 case Intrinsic::s390_vstrczbs:
2613 case Intrinsic::s390_vstrczhs:
2614 case Intrinsic::s390_vstrczfs:
2619 case Intrinsic::s390_vstrsb:
2620 case Intrinsic::s390_vstrsh:
2621 case Intrinsic::s390_vstrsf:
2626 case Intrinsic::s390_vstrszb:
2627 case Intrinsic::s390_vstrszh:
2628 case Intrinsic::s390_vstrszf:
2633 case Intrinsic::s390_vfcedbs:
2634 case Intrinsic::s390_vfcesbs:
2639 case Intrinsic::s390_vfchdbs:
2640 case Intrinsic::s390_vfchsbs:
2645 case Intrinsic::s390_vfchedbs:
2646 case Intrinsic::s390_vfchesbs:
2651 case Intrinsic::s390_vftcidb:
2652 case Intrinsic::s390_vftcisb:
2657 case Intrinsic::s390_tdc:
2675 for (
unsigned I = 2;
I < NumOps; ++
I)
2678 assert(
Op->getNumValues() == 2 &&
"Expected only CC result and chain");
2684 return Intr.getNode();
2694 for (
unsigned I = 1;
I < NumOps; ++
I)
2698 return Intr.getNode();
2708 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \
2709 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \
2710 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X
2735 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1.getNode());
2736 if (!ConstOp1 || ConstOp1->getValueSizeInBits(0) > 64)
2739 int64_t
Value = ConstOp1->getSExtValue();
2755 if (!
C.Op0.hasOneUse() ||
2761 auto *Load = cast<LoadSDNode>(
C.Op0);
2762 unsigned NumBits = Load->getMemoryVT().getSizeInBits();
2763 if ((NumBits != 8 && NumBits != 16) ||
2764 NumBits != Load->getMemoryVT().getStoreSizeInBits())
2769 auto *ConstOp1 = cast<ConstantSDNode>(
C.Op1);
2770 if (!ConstOp1 || ConstOp1->getValueSizeInBits(0) > 64)
2773 uint64_t Mask = (1 << NumBits) - 1;
2776 int64_t SignedValue = ConstOp1->getSExtValue();
2783 }
else if (NumBits == 8) {
2809 if (
C.Op0.getValueType() != MVT::i32 ||
2810 Load->getExtensionType() != ExtType) {
2812 Load->getBasePtr(), Load->getPointerInfo(),
2813 Load->getMemoryVT(), Load->getAlign(),
2814 Load->getMemOperand()->getFlags());
2820 if (
C.Op1.getValueType() != MVT::i32 ||
2821 Value != ConstOp1->getZExtValue())
2828 auto *Load = dyn_cast<LoadSDNode>(
Op.getNode());
2831 if (Load->getMemoryVT() == MVT::i8)
2834 switch (Load->getExtensionType()) {
2851 if (
C.Op0.getValueType() == MVT::i128)
2853 if (
C.Op0.getValueType() == MVT::f128)
2859 if (isa<ConstantFPSDNode>(
C.Op1))
2864 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1);
2865 if (ConstOp1 && ConstOp1->getZExtValue() == 0)
2883 isUInt<16>(ConstOp1->getZExtValue()))
2888 isInt<16>(ConstOp1->getSExtValue()))
2894 unsigned Opcode0 =
C.Op0.getOpcode();
2901 C.Op0.getConstantOperandVal(1) == 0xffffffff)
2916 ((
N->getOperand(0) ==
C.Op0 &&
N->getOperand(1) ==
C.Op1) ||
2917 (
N->getOperand(0) ==
C.Op1 &&
N->getOperand(1) ==
C.Op0))) {
2938 auto *C1 = dyn_cast<ConstantFPSDNode>(
C.Op1);
2939 if (C1 && C1->isZero()) {
2958 if (
C.Op0.getOpcode() ==
ISD::SHL &&
C.Op0.getValueType() == MVT::i64 &&
2960 auto *C1 = dyn_cast<ConstantSDNode>(
C.Op0.getOperand(1));
2961 if (C1 && C1->getZExtValue() == 32) {
2962 SDValue ShlOp0 =
C.Op0.getOperand(0);
2966 cast<VTSDNode>(
N->getOperand(1))->getVT() == MVT::i32) {
2981 C.Op0.getOperand(0).getOpcode() ==
ISD::LOAD &&
2983 cast<ConstantSDNode>(
C.Op1)->getValueSizeInBits(0) <= 64 &&
2984 C.Op1->getAsZExtVal() == 0) {
2985 auto *L = cast<LoadSDNode>(
C.Op0.getOperand(0));
2986 if (L->getMemoryVT().getStoreSizeInBits().getFixedValue() <=
2987 C.Op0.getValueSizeInBits().getFixedValue()) {
2988 unsigned Type = L->getExtensionType();
2991 C.Op0 =
C.Op0.getOperand(0);
3001 auto *Shift = dyn_cast<ConstantSDNode>(
N.getOperand(1));
3005 uint64_t Amount = Shift->getZExtValue();
3006 if (Amount >=
N.getValueSizeInBits())
3021 unsigned ICmpType) {
3022 assert(Mask != 0 &&
"ANDs with zero should have been removed by now");
3044 if (EffectivelyUnsigned && CmpVal > 0 && CmpVal <=
Low) {
3050 if (EffectivelyUnsigned && CmpVal <
Low) {
3058 if (CmpVal == Mask) {
3064 if (EffectivelyUnsigned && CmpVal >= Mask -
Low && CmpVal < Mask) {
3070 if (EffectivelyUnsigned && CmpVal > Mask -
Low && CmpVal <= Mask) {
3078 if (EffectivelyUnsigned && CmpVal >= Mask -
High && CmpVal <
High) {
3084 if (EffectivelyUnsigned && CmpVal > Mask -
High && CmpVal <=
High) {
3113 if (
C.Op0.getValueType() == MVT::i128) {
3118 auto *Mask = dyn_cast<ConstantSDNode>(
C.Op1);
3119 if (Mask && Mask->getAPIntValue() == 0) {
3134 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1);
3137 uint64_t CmpVal = ConstOp1->getZExtValue();
3144 NewC.Op0 =
C.Op0.getOperand(0);
3145 NewC.Op1 =
C.Op0.getOperand(1);
3146 Mask = dyn_cast<ConstantSDNode>(NewC.Op1);
3149 MaskVal = Mask->getZExtValue();
3154 if (NewC.Op0.getValueType() != MVT::i64 ||
3169 MaskVal = -(CmpVal & -CmpVal);
3177 unsigned BitSize = NewC.Op0.getValueSizeInBits();
3178 unsigned NewCCMask, ShiftVal;
3180 NewC.Op0.getOpcode() ==
ISD::SHL &&
3182 (MaskVal >> ShiftVal != 0) &&
3183 ((CmpVal >> ShiftVal) << ShiftVal) == CmpVal &&
3185 MaskVal >> ShiftVal,
3188 NewC.Op0 = NewC.Op0.getOperand(0);
3189 MaskVal >>= ShiftVal;
3191 NewC.Op0.getOpcode() ==
ISD::SRL &&
3193 (MaskVal << ShiftVal != 0) &&
3194 ((CmpVal << ShiftVal) >> ShiftVal) == CmpVal &&
3196 MaskVal << ShiftVal,
3199 NewC.Op0 = NewC.Op0.getOperand(0);
3200 MaskVal <<= ShiftVal;
3211 if (Mask && Mask->getZExtValue() == MaskVal)
3216 C.CCMask = NewCCMask;
3224 if (
C.Op0.getValueType() != MVT::i128)
3242 bool Swap =
false, Invert =
false;
3261 C.CCMask ^=
C.CCValid;
3271 auto *Mask = dyn_cast<ConstantSDNode>(
C.Op0.getOperand(1));
3272 if (!Mask || Mask->getValueSizeInBits(0) > 64)
3275 if ((~Known.
Zero).getZExtValue() & ~Mask->getZExtValue())
3278 C.Op0 =
C.Op0.getOperand(0);
3290 C.CCValid = CCValid;
3293 C.CCMask =
CC < 4 ? 1 << (3 -
CC) : 0;
3296 C.CCMask =
CC < 4 ? ~(1 << (3 -
CC)) : -1;
3300 C.CCMask =
CC < 4 ? ~0U << (4 -
CC) : -1;
3303 C.CCMask =
CC < 4 ? ~(~0U << (4 -
CC)) : 0;
3307 C.CCMask =
CC < 4 ? ~0U << (3 -
CC) : -1;
3310 C.CCMask =
CC < 4 ? ~(~0U << (3 -
CC)) : 0;
3313 C.CCMask &= CCValid;
3321 bool IsSignaling =
false) {
3324 unsigned Opcode, CCValid;
3336 Comparison
C(CmpOp0, CmpOp1, Chain);
3338 if (
C.Op0.getValueType().isFloatingPoint()) {
3342 else if (!IsSignaling)
3364 C.CCMask &= ~SystemZ::CCMASK_CMP_UO;
3385 if (!
C.Op1.getNode()) {
3387 switch (
C.Op0.getOpcode()) {
3414 return DAG.
getNode(
C.Opcode,
DL, VTs,
C.Chain,
C.Op0,
C.Op1);
3416 return DAG.
getNode(
C.Opcode,
DL, MVT::i32,
C.Op0,
C.Op1);
3425 Op0 = DAG.
getNode(Extend,
DL, MVT::i64, Op0);
3426 Op1 = DAG.
getNode(Extend,
DL, MVT::i64, Op1);
3451 unsigned CCValid,
unsigned CCMask) {
3480 case CmpMode::Int:
return 0;
3500 case CmpMode::FP:
return 0;
3501 case CmpMode::StrictFP:
return 0;
3502 case CmpMode::SignalingFP:
return 0;
3534 int Mask[] = { Start, -1, Start + 1, -1 };
3554 !Subtarget.hasVectorEnhancements1()) {
3568 SDValue Ops[2] = { Res, NewChain };
3577 return DAG.
getNode(Opcode,
DL, VTs, Chain, CmpOp0, CmpOp1);
3579 return DAG.
getNode(Opcode,
DL, VT, CmpOp0, CmpOp1);
3592 bool IsSignaling)
const {
3595 assert (!IsSignaling || Chain);
3596 CmpMode Mode = IsSignaling ? CmpMode::SignalingFP :
3597 Chain ? CmpMode::StrictFP : IsFP ? CmpMode::FP : CmpMode::Int;
3598 bool Invert =
false;
3606 assert(IsFP &&
"Unexpected integer comparison");
3608 DL, VT, CmpOp1, CmpOp0, Chain);
3610 DL, VT, CmpOp0, CmpOp1, Chain);
3614 LT.getValue(1),
GE.getValue(1));
3623 assert(IsFP &&
"Unexpected integer comparison");
3625 DL, VT, CmpOp1, CmpOp0, Chain);
3627 DL, VT, CmpOp0, CmpOp1, Chain);
3631 LT.getValue(1),
GT.getValue(1));
3640 Cmp = getVectorCmp(DAG, Opcode,
DL, VT, CmpOp0, CmpOp1, Chain);
3644 Cmp = getVectorCmp(DAG, Opcode,
DL, VT, CmpOp1, CmpOp0, Chain);
3649 Chain =
Cmp.getValue(1);
3657 if (Chain && Chain.
getNode() !=
Cmp.getNode()) {
3670 EVT VT =
Op.getValueType();
3672 return lowerVectorSETCC(DAG,
DL, VT,
CC, CmpOp0, CmpOp1);
3681 bool IsSignaling)
const {
3687 EVT VT =
Op.getNode()->getValueType(0);
3689 SDValue Res = lowerVectorSETCC(DAG,
DL, VT,
CC, CmpOp0, CmpOp1,
3690 Chain, IsSignaling);
3754 cast<ConstantSDNode>(
C.Op1)->getValueSizeInBits(0) <= 64 &&
3755 C.Op1->getAsZExtVal() == 0) {
3763 SDValue Ops[] = {TrueOp, FalseOp,
3837 Chain = DAG.
getCopyToReg(Chain,
DL, SystemZ::R2D, GOTOffset, Glue);
3844 Node->getValueType(0),
3856 assert(Mask &&
"Missing call preserved mask for calling convention");
3864 Chain = DAG.
getNode(Opcode,
DL, NodeTys, Ops);
3871SDValue SystemZTargetLowering::lowerThreadPointer(
const SDLoc &
DL,
3903 SDValue TP = lowerThreadPointer(
DL, DAG);
4011 if (
CP->isMachineConstantPoolEntry())
4030 unsigned Depth =
Op.getConstantOperandVal(0);
4037 int BackChainIdx = TFL->getOrCreateFramePointerSaveIndex(MF);
4066 unsigned Depth =
Op.getConstantOperandVal(0);
4074 SDValue FrameAddr = lowerFRAMEADDR(
Op, DAG);
4076 int Offset = TFL->getReturnAddressOffset(MF);
4087 &SystemZ::GR64BitRegClass);
4095 EVT InVT =
In.getValueType();
4096 EVT ResVT =
Op.getValueType();
4101 if (
auto *LoadN = dyn_cast<LoadSDNode>(In))
4104 LoadN->getBasePtr(), LoadN->getMemOperand());
4110 if (InVT == MVT::i32 && ResVT == MVT::f32) {
4112 if (Subtarget.hasHighWord()) {
4116 MVT::i64,
SDValue(U64, 0), In);
4124 DL, MVT::f32, Out64);
4126 if (InVT == MVT::f32 && ResVT == MVT::i32) {
4129 MVT::f64,
SDValue(U64, 0), In);
4131 if (Subtarget.hasHighWord())
4145 return lowerVASTART_XPLINK(
Op, DAG);
4147 return lowerVASTART_ELF(
Op, DAG);
4162 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
4176 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
4180 const unsigned NumFields = 4;
4191 for (
unsigned I = 0;
I < NumFields; ++
I) {
4196 MemOps[
I] = DAG.
getStore(Chain,
DL, Fields[
I], FieldAddr,
4208 const Value *DstSV = cast<SrcValueSDNode>(
Op.getOperand(3))->getValue();
4209 const Value *SrcSV = cast<SrcValueSDNode>(
Op.getOperand(4))->getValue();
4215 Align(8),
false,
false,
4221SystemZTargetLowering::lowerDYNAMIC_STACKALLOC(
SDValue Op,
4224 return lowerDYNAMIC_STACKALLOC_XPLINK(
Op, DAG);
4226 return lowerDYNAMIC_STACKALLOC_ELF(
Op, DAG);
4230SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_XPLINK(
SDValue Op,
4242 uint64_t AlignVal = (RealignOpt ?
Align->getAsZExtVal() : 0);
4245 uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
4246 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;
4252 if (ExtraAlignSpace)
4256 bool IsSigned =
false;
4257 bool DoesNotReturn =
false;
4258 bool IsReturnValueUsed =
false;
4259 EVT VT =
Op.getValueType();
4281 if (ExtraAlignSpace) {
4293SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_ELF(
SDValue Op,
4307 uint64_t AlignVal = (RealignOpt ?
Align->getAsZExtVal() : 0);
4310 uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
4311 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;
4322 Backchain = DAG.
getLoad(MVT::i64,
DL, Chain, getBackchainAddress(OldSP, DAG),
4326 if (ExtraAlignSpace)
4334 DAG.
getVTList(MVT::i64, MVT::Other), Chain, OldSP, NeededSpace);
4350 if (RequiredAlign > StackAlign) {
4360 Chain = DAG.
getStore(Chain,
DL, Backchain, getBackchainAddress(NewSP, DAG),
4367SDValue SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET(
4376 EVT VT =
Op.getValueType();
4383 Op.getOperand(1), Ops[1], Ops[0]);
4384 else if (Subtarget.hasMiscellaneousExtensions2())
4389 Op.getOperand(0),
Op.getOperand(1), Ops[1], Ops[0]);
4413 LL, RL, Ops[1], Ops[0]);
4424 EVT VT =
Op.getValueType();
4431 Op.getOperand(1), Ops[1], Ops[0]);
4437 Op.getOperand(0),
Op.getOperand(1), Ops[1], Ops[0]);
4445 EVT VT =
Op.getValueType();
4465 EVT VT =
Op.getValueType();
4472 Op.getOperand(0),
Op.getOperand(1), Ops[1], Ops[0]);
4477 assert(
Op.getValueType() == MVT::i64 &&
"Should be 64-bit operation");
4480 SDValue Ops[] = {
Op.getOperand(0),
Op.getOperand(1)};
4489 if ((Masks[0] >> 32) == 0xffffffff &&
uint32_t(Masks[1]) == 0xffffffff)
4491 else if ((Masks[1] >> 32) == 0xffffffff &&
uint32_t(Masks[0]) == 0xffffffff)
4507 if (!isInt<16>(
Value))
4528 MVT::i64, HighOp, Low32);
4539 if (
N->getValueType(0) == MVT::i128) {
4540 unsigned BaseOp = 0;
4541 unsigned FlagOp = 0;
4542 bool IsBorrow =
false;
4543 switch (
Op.getOpcode()) {
4566 unsigned BaseOp = 0;
4567 unsigned CCValid = 0;
4568 unsigned CCMask = 0;
4570 switch (
Op.getOpcode()) {
4598 if (
N->getValueType(1) == MVT::i1)
4621 MVT VT =
N->getSimpleValueType(0);
4632 if (VT == MVT::i128) {
4633 unsigned BaseOp = 0;
4634 unsigned FlagOp = 0;
4635 bool IsBorrow =
false;
4636 switch (
Op.getOpcode()) {
4663 unsigned BaseOp = 0;
4664 unsigned CCValid = 0;
4665 unsigned CCMask = 0;
4667 switch (
Op.getOpcode()) {
4696 if (
N->getValueType(1) == MVT::i1)
4704 EVT VT =
Op.getValueType();
4706 Op =
Op.getOperand(0);
4754 if (NumSignificantBits == 0)
4760 BitSize = std::min(BitSize, OrigBitSize);
4769 for (int64_t
I = BitSize / 2;
I >= 8;
I =
I / 2) {
4771 if (BitSize != OrigBitSize)
4808 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
4810 (
Node->getMemoryVT() == MVT::i128 ||
Node->getMemoryVT() == MVT::f128) &&
4811 "Only custom lowering i128 or f128.");
4823 EVT PtrVT =
Addr.getValueType();
4824 EVT WideVT = MVT::i32;
4847 unsigned Opcode)
const {
4848 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
4851 EVT NarrowVT =
Node->getMemoryVT();
4852 EVT WideVT = MVT::i32;
4853 if (NarrowVT == WideVT)
4865 if (
auto *Const = dyn_cast<ConstantSDNode>(Src2)) {
4871 SDValue AlignedAddr, BitShift, NegBitShift;
4889 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift,
4908 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
4909 EVT MemVT =
Node->getMemoryVT();
4910 if (MemVT == MVT::i32 || MemVT == MVT::i64) {
4912 assert(
Op.getValueType() == MemVT &&
"Mismatched VTs");
4913 assert(Subtarget.hasInterlockedAccess1() &&
4914 "Should have been expanded by AtomicExpand pass.");
4920 Node->getChain(),
Node->getBasePtr(), NegSrc2,
4921 Node->getMemOperand());
4930 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
4938 if (
Node->getMemoryVT() == MVT::i128) {
4947 EVT NarrowVT =
Node->getMemoryVT();
4948 EVT WideVT = NarrowVT == MVT::i64 ? MVT::i64 : MVT::i32;
4949 if (NarrowVT == WideVT) {
4951 SDValue Ops[] = { ChainIn,
Addr, CmpVal, SwapVal };
4953 DL, Tys, Ops, NarrowVT, MMO);
4967 SDValue AlignedAddr, BitShift, NegBitShift;
4972 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift,
4975 VTList, Ops, NarrowVT, MMO);
4989SystemZTargetLowering::getTargetMMOFlags(
const Instruction &
I)
const {
4994 if (
auto *SI = dyn_cast<StoreInst>(&
I))
4997 if (
auto *LI = dyn_cast<LoadInst>(&
I))
5000 if (
auto *AI = dyn_cast<AtomicRMWInst>(&
I))
5003 if (
auto *AI = dyn_cast<AtomicCmpXchgInst>(&
I))
5015 "in GHC calling convention");
5017 Regs->getStackPointerRegister(),
Op.getValueType());
5028 "in GHC calling convention");
5035 if (StoreBackchain) {
5037 Chain,
DL, Regs->getStackPointerRegister(), MVT::i64);
5038 Backchain = DAG.
getLoad(MVT::i64,
DL, Chain, getBackchainAddress(OldSP, DAG),
5042 Chain = DAG.
getCopyToReg(Chain,
DL, Regs->getStackPointerRegister(), NewSP);
5045 Chain = DAG.
getStore(Chain,
DL, Backchain, getBackchainAddress(NewSP, DAG),
5053 bool IsData =
Op.getConstantOperandVal(4);
5056 return Op.getOperand(0);
5059 bool IsWrite =
Op.getConstantOperandVal(2);
5061 auto *
Node = cast<MemIntrinsicSDNode>(
Op.getNode());
5065 Node->getVTList(), Ops,
5066 Node->getMemoryVT(),
Node->getMemOperand());
5078SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(
SDValue Op,
5080 unsigned Opcode, CCValid;
5082 assert(
Op->getNumValues() == 2 &&
"Expected only CC result and chain");
5093SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(
SDValue Op,
5095 unsigned Opcode, CCValid;
5098 if (
Op->getNumValues() == 1)
5100 assert(
Op->getNumValues() == 2 &&
"Expected a CC and non-CC result");
5105 unsigned Id =
Op.getConstantOperandVal(0);
5107 case Intrinsic::thread_pointer:
5108 return lowerThreadPointer(
SDLoc(
Op), DAG);
5110 case Intrinsic::s390_vpdi:
5112 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5114 case Intrinsic::s390_vperm:
5116 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5118 case Intrinsic::s390_vuphb:
5119 case Intrinsic::s390_vuphh:
5120 case Intrinsic::s390_vuphf:
5124 case Intrinsic::s390_vuplhb:
5125 case Intrinsic::s390_vuplhh:
5126 case Intrinsic::s390_vuplhf:
5130 case Intrinsic::s390_vuplb:
5131 case Intrinsic::s390_vuplhw:
5132 case Intrinsic::s390_vuplf:
5136 case Intrinsic::s390_vupllb:
5137 case Intrinsic::s390_vupllh:
5138 case Intrinsic::s390_vupllf:
5142 case Intrinsic::s390_vsumb:
5143 case Intrinsic::s390_vsumh:
5144 case Intrinsic::s390_vsumgh:
5145 case Intrinsic::s390_vsumgf:
5146 case Intrinsic::s390_vsumqf:
5147 case Intrinsic::s390_vsumqg:
5149 Op.getOperand(1),
Op.getOperand(2));
5151 case Intrinsic::s390_vaq:
5153 Op.getOperand(1),
Op.getOperand(2));
5154 case Intrinsic::s390_vaccb:
5155 case Intrinsic::s390_vacch:
5156 case Intrinsic::s390_vaccf:
5157 case Intrinsic::s390_vaccg:
5158 case Intrinsic::s390_vaccq:
5160 Op.getOperand(1),
Op.getOperand(2));
5161 case Intrinsic::s390_vacq:
5163 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5164 case Intrinsic::s390_vacccq:
5166 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5168 case Intrinsic::s390_vsq:
5170 Op.getOperand(1),
Op.getOperand(2));
5171 case Intrinsic::s390_vscbib:
5172 case Intrinsic::s390_vscbih:
5173 case Intrinsic::s390_vscbif:
5174 case Intrinsic::s390_vscbig:
5175 case Intrinsic::s390_vscbiq:
5177 Op.getOperand(1),
Op.getOperand(2));
5178 case Intrinsic::s390_vsbiq:
5180 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5181 case Intrinsic::s390_vsbcbiq:
5183 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5204 { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 } },
5207 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
5210 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
5213 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
5216 { 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 } },
5219 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
5222 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
5225 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
5228 { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 } },
5231 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
5234 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
5237 { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } },
5240 { 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 } }
5254 OpNo0 = OpNo1 = OpNos[1];
5255 }
else if (OpNos[1] < 0) {
5256 OpNo0 = OpNo1 = OpNos[0];
5274 unsigned &OpNo0,
unsigned &OpNo1) {
5275 int OpNos[] = { -1, -1 };
5288 if (OpNos[ModelOpNo] == 1 - RealOpNo)
5290 OpNos[ModelOpNo] = RealOpNo;
5298 unsigned &OpNo0,
unsigned &OpNo1) {
5315 int Elt = Bytes[
From];
5318 Transform[
From] = -1;
5320 while (
P.Bytes[To] != Elt) {
5325 Transform[
From] = To;
5348 if (
auto *VSN = dyn_cast<ShuffleVectorSDNode>(ShuffleOp)) {
5349 Bytes.
resize(NumElements * BytesPerElement, -1);
5350 for (
unsigned I = 0;
I < NumElements; ++
I) {
5351 int Index = VSN->getMaskElt(
I);
5353 for (
unsigned J = 0; J < BytesPerElement; ++J)
5354 Bytes[
I * BytesPerElement + J] = Index * BytesPerElement + J;
5359 isa<ConstantSDNode>(ShuffleOp.
getOperand(1))) {
5361 Bytes.
resize(NumElements * BytesPerElement, -1);
5362 for (
unsigned I = 0;
I < NumElements; ++
I)
5363 for (
unsigned J = 0; J < BytesPerElement; ++J)
5364 Bytes[
I * BytesPerElement + J] = Index * BytesPerElement + J;
5375 unsigned BytesPerElement,
int &
Base) {
5377 for (
unsigned I = 0;
I < BytesPerElement; ++
I) {
5378 if (Bytes[Start +
I] >= 0) {
5379 unsigned Elem = Bytes[Start +
I];
5383 if (
unsigned(
Base) % Bytes.
size() + BytesPerElement > Bytes.
size())
5385 }
else if (
unsigned(
Base) != Elem -
I)
5398 unsigned &StartIndex,
unsigned &OpNo0,
5400 int OpNos[] = { -1, -1 };
5402 for (
unsigned I = 0;
I < 16; ++
I) {
5403 int Index = Bytes[
I];
5409 Shift = ExpectedShift;
5410 else if (Shift != ExpectedShift)
5414 if (OpNos[ModelOpNo] == 1 - RealOpNo)
5416 OpNos[ModelOpNo] = RealOpNo;
5453 N =
N->getOperand(0);
5455 if (
auto *
Op = dyn_cast<ConstantSDNode>(
N->getOperand(0)))
5456 return Op->getZExtValue() == 0;
5462 for (
unsigned I = 0;
I < Num ;
I++)
5474 for (
unsigned I = 0;
I < 2; ++
I)
5478 unsigned StartIndex, OpNo0, OpNo1;
5487 if (ZeroVecIdx != UINT32_MAX) {
5488 bool MaskFirst =
true;
5493 if (OpNo == ZeroVecIdx &&
I == 0) {
5498 if (OpNo != ZeroVecIdx && Byte == 0) {
5505 if (ZeroIdx != -1) {
5508 if (Bytes[
I] >= 0) {
5511 if (OpNo == ZeroVecIdx)
5521 SDValue Src = ZeroVecIdx == 0 ? Ops[1] : Ops[0];
5539 (!Ops[1].
isUndef() ? Ops[1] : Ops[0]), Op2);
5544struct GeneralShuffle {
5545 GeneralShuffle(
EVT vt) : VT(vt), UnpackFromEltSize(UINT_MAX) {}
5549 void tryPrepareForUnpack();
5550 bool unpackWasPrepared() {
return UnpackFromEltSize <= 4; }
5565 unsigned UnpackFromEltSize;
5570void GeneralShuffle::addUndef() {
5572 for (
unsigned I = 0;
I < BytesPerElement; ++
I)
5573 Bytes.push_back(-1);
5582bool GeneralShuffle::add(
SDValue Op,
unsigned Elem) {
5588 EVT FromVT =
Op.getNode() ?
Op.getValueType() : VT;
5593 if (FromBytesPerElement < BytesPerElement)
5597 (FromBytesPerElement - BytesPerElement));
5600 while (
Op.getNode()) {
5602 Op =
Op.getOperand(0);
5618 }
else if (
Op.isUndef()) {
5627 for (; OpNo < Ops.size(); ++OpNo)
5628 if (Ops[OpNo] ==
Op)
5630 if (OpNo == Ops.size())
5635 for (
unsigned I = 0;
I < BytesPerElement; ++
I)
5636 Bytes.push_back(
Base +
I);
5645 if (Ops.size() == 0)
5649 tryPrepareForUnpack();
5652 if (Ops.size() == 1)
5653 Ops.push_back(DAG.
getUNDEF(MVT::v16i8));
5664 unsigned Stride = 1;
5665 for (; Stride * 2 < Ops.size(); Stride *= 2) {
5666 for (
unsigned I = 0;
I < Ops.size() - Stride;
I += Stride * 2) {
5667 SDValue SubOps[] = { Ops[
I], Ops[
I + Stride] };
5676 else if (OpNo ==
I + Stride)
5687 if (NewBytes[J] >= 0) {
5689 "Invalid double permute");
5692 assert(NewBytesMap[J] < 0 &&
"Invalid double permute");
5698 if (NewBytes[J] >= 0)
5706 Ops[1] = Ops[Stride];
5714 unsigned OpNo0, OpNo1;
5716 if (unpackWasPrepared() && Ops[1].
isUndef())
5718 else if (
const Permute *
P =
matchPermute(Bytes, OpNo0, OpNo1))
5723 Op = insertUnpackIfPrepared(DAG,
DL,
Op);
5730 dbgs() << Msg.c_str() <<
" { ";
5731 for (
unsigned i = 0; i < Bytes.
size(); i++)
5732 dbgs() << Bytes[i] <<
" ";
5740void GeneralShuffle::tryPrepareForUnpack() {
5742 if (ZeroVecOpNo == UINT32_MAX || Ops.size() == 1)
5747 if (Ops.size() > 2 &&
5752 UnpackFromEltSize = 1;
5753 for (; UnpackFromEltSize <= 4; UnpackFromEltSize *= 2) {
5754 bool MatchUnpack =
true;
5757 unsigned ToEltSize = UnpackFromEltSize * 2;
5758 bool IsZextByte = (Elt % ToEltSize) < UnpackFromEltSize;
5761 if (Bytes[Elt] != -1) {
5763 if (IsZextByte != (OpNo == ZeroVecOpNo)) {
5764 MatchUnpack =
false;
5770 if (Ops.size() == 2) {
5773 if (SrcBytes[i] != -1 && SrcBytes[i] % 16 !=
int(i)) {
5774 UnpackFromEltSize = UINT_MAX;
5781 if (UnpackFromEltSize > 4)
5784 LLVM_DEBUG(
dbgs() <<
"Preparing for final unpack of element size "
5785 << UnpackFromEltSize <<
". Zero vector is Op#" << ZeroVecOpNo
5787 dumpBytes(Bytes,
"Original Bytes vector:"););
5792 Elt += UnpackFromEltSize;
5793 for (
unsigned i = 0; i < UnpackFromEltSize; i++, Elt++,
B++)
5794 Bytes[
B] = Bytes[Elt];
5800 Ops.erase(&Ops[ZeroVecOpNo]);
5802 if (Bytes[
I] >= 0) {
5804 if (OpNo > ZeroVecOpNo)
5815 if (!unpackWasPrepared())
5817 unsigned InBits = UnpackFromEltSize * 8;
5821 unsigned OutBits = InBits * 2;
5830 if (!
Op.getOperand(
I).isUndef())
5846 if (
Value.isUndef())
5899 GeneralShuffle GS(VT);
5901 bool FoundOne =
false;
5902 for (
unsigned I = 0;
I < NumElements; ++
I) {
5905 Op =
Op.getOperand(0);
5908 unsigned Elem =
Op.getConstantOperandVal(1);
5909 if (!GS.add(
Op.getOperand(0), Elem))
5912 }
else if (
Op.isUndef()) {
5926 if (!ResidueOps.
empty()) {
5927 while (ResidueOps.
size() < NumElements)
5929 for (
auto &
Op : GS.Ops) {
5930 if (!
Op.getNode()) {
5936 return GS.getNode(DAG,
SDLoc(BVN));
5939bool SystemZTargetLowering::isVectorElementLoad(
SDValue Op)
const {
5940 if (
Op.getOpcode() ==
ISD::LOAD && cast<LoadSDNode>(
Op)->isUnindexed())
5942 if (
auto *AL = dyn_cast<AtomicSDNode>(
Op))
5956 unsigned int NumElements = Elems.
size();
5957 unsigned int Count = 0;
5958 for (
auto Elem : Elems) {
5959 if (!Elem.isUndef()) {
5962 else if (Elem != Single) {
5982 if (
Single.getNode() && (Count > 1 || isVectorElementLoad(Single)))
5986 bool AllLoads =
true;
5987 for (
auto Elem : Elems)
5988 if (!isVectorElementLoad(Elem)) {
5994 if (VT == MVT::v2i64 && !AllLoads)
5998 if (VT == MVT::v2f64 && !AllLoads)
6008 if (VT == MVT::v4f32 && !AllLoads) {
6022 DL, MVT::v2i64, Op01, Op23);
6030 unsigned NumConstants = 0;
6031 for (
unsigned I = 0;
I < NumElements; ++
I) {
6045 if (NumConstants > 0) {
6046 for (
unsigned I = 0;
I < NumElements; ++
I)
6057 std::map<const SDNode*, unsigned> UseCounts;
6058 SDNode *LoadMaxUses =
nullptr;
6059 for (
unsigned I = 0;
I < NumElements; ++
I)
6060 if (isVectorElementLoad(Elems[
I])) {
6061 SDNode *Ld = Elems[
I].getNode();
6063 if (LoadMaxUses ==
nullptr || UseCounts[LoadMaxUses] < UseCounts[Ld])
6066 if (LoadMaxUses !=
nullptr) {
6067 ReplicatedVal =
SDValue(LoadMaxUses, 0);
6071 unsigned I1 = NumElements / 2 - 1;
6072 unsigned I2 = NumElements - 1;
6073 bool Def1 = !Elems[
I1].isUndef();
6074 bool Def2 = !Elems[I2].isUndef();
6088 for (
unsigned I = 0;
I < NumElements; ++
I)
6089 if (!
Done[
I] && !Elems[
I].
isUndef() && Elems[
I] != ReplicatedVal)
6097 auto *BVN = cast<BuildVectorSDNode>(
Op.getNode());
6099 EVT VT =
Op.getValueType();
6101 if (BVN->isConstant()) {
6120 for (
unsigned I = 0;
I < NumElements; ++
I)
6121 Ops[
I] =
Op.getOperand(
I);
6122 return buildVector(DAG,
DL, VT, Ops);
6127 auto *VSN = cast<ShuffleVectorSDNode>(
Op.getNode());
6129 EVT VT =
Op.getValueType();
6132 if (VSN->isSplat()) {
6134 unsigned Index = VSN->getSplatIndex();
6136 "Splat index should be defined and in first operand");
6146 GeneralShuffle
GS(VT);
6147 for (
unsigned I = 0;
I < NumElements; ++
I) {
6148 int Elt = VSN->getMaskElt(
I);
6151 else if (!
GS.add(
Op.getOperand(
unsigned(Elt) / NumElements),
6152 unsigned(Elt) % NumElements))
6155 return GS.getNode(DAG,
SDLoc(VSN));
6174 EVT VT =
Op.getValueType();
6179 if (VT == MVT::v2f64 &&
6199SystemZTargetLowering::lowerEXTRACT_VECTOR_ELT(
SDValue Op,
6205 EVT VT =
Op.getValueType();
6209 if (
auto *CIndexN = dyn_cast<ConstantSDNode>(Op1)) {
6224SDValue SystemZTargetLowering::
6227 EVT OutVT =
Op.getValueType();
6237 }
while (FromBits != ToBits);
6242SDValue SystemZTargetLowering::
6246 EVT OutVT =
Op.getValueType();
6250 unsigned NumInPerOut = InNumElts / OutNumElts;
6256 unsigned ZeroVecElt = InNumElts;
6257 for (
unsigned PackedElt = 0; PackedElt < OutNumElts; PackedElt++) {
6258 unsigned MaskElt = PackedElt * NumInPerOut;
6259 unsigned End = MaskElt + NumInPerOut - 1;
6260 for (; MaskElt <
End; MaskElt++)
6261 Mask[MaskElt] = ZeroVecElt++;
6262 Mask[MaskElt] = PackedElt;
6269 unsigned ByScalar)
const {
6274 EVT VT =
Op.getValueType();
6278 if (
auto *BVN = dyn_cast<BuildVectorSDNode>(Op1)) {
6279 APInt SplatBits, SplatUndef;
6280 unsigned SplatBitSize;
6284 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs,
6285 ElemBitSize,
true) &&
6286 SplatBitSize == ElemBitSize) {
6289 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6298 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6304 if (
auto *VSN = dyn_cast<ShuffleVectorSDNode>(Op1)) {
6305 if (VSN->isSplat()) {
6307 unsigned Index = VSN->getSplatIndex();
6309 "Splat index should be defined and in first operand");
6316 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6328 MVT DstVT =
Op.getSimpleValueType();
6331 unsigned SrcAS =
N->getSrcAddressSpace();
6333 assert(SrcAS !=
N->getDestAddressSpace() &&
6334 "addrspacecast must be between different address spaces");
6342 }
else if (DstVT == MVT::i32) {
6356 MVT ResultVT =
Op.getSimpleValueType();
6358 unsigned Check =
Op.getConstantOperandVal(1);
6360 unsigned TDCMask = 0;
6394 int SPFI = cast<FrameIndexSDNode>(
StackPtr.getNode())->getIndex();
6405 return DAG.
getLoad(MVT::i64,
DL, Chain, StackPtr, MPI);
6410 switch (
Op.getOpcode()) {
6412 return lowerFRAMEADDR(
Op, DAG);
6414 return lowerRETURNADDR(
Op, DAG);
6416 return lowerBR_CC(
Op, DAG);
6418 return lowerSELECT_CC(
Op, DAG);
6420 return lowerSETCC(
Op, DAG);
6422 return lowerSTRICT_FSETCC(
Op, DAG,
false);
6424 return lowerSTRICT_FSETCC(
Op, DAG,
true);
6426 return lowerGlobalAddress(cast<GlobalAddressSDNode>(
Op), DAG);
6428 return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(
Op), DAG);
6430 return lowerBlockAddress(cast<BlockAddressSDNode>(
Op), DAG);
6432 return lowerJumpTable(cast<JumpTableSDNode>(
Op), DAG);
6434 return lowerConstantPool(cast<ConstantPoolSDNode>(
Op), DAG);
6436 return lowerBITCAST(
Op, DAG);
6438 return lowerVASTART(
Op, DAG);
6440 return lowerVACOPY(
Op, DAG);
6442 return lowerDYNAMIC_STACKALLOC(
Op, DAG);
6444 return lowerGET_DYNAMIC_AREA_OFFSET(
Op, DAG);
6446 return lowerSMUL_LOHI(
Op, DAG);
6448 return lowerUMUL_LOHI(
Op, DAG);
6450 return lowerSDIVREM(
Op, DAG);
6452 return lowerUDIVREM(
Op, DAG);
6457 return lowerXALUO(
Op, DAG);
6460 return lowerUADDSUBO_CARRY(
Op, DAG);
6462 return lowerOR(
Op, DAG);
6464 return lowerCTPOP(
Op, DAG);
6466 return lowerVECREDUCE_ADD(
Op, DAG);
6468 return lowerATOMIC_FENCE(
Op, DAG);
6473 return lowerATOMIC_LDST_I128(
Op, DAG);
6477 return lowerATOMIC_LOAD_SUB(
Op, DAG);
6495 return lowerATOMIC_CMP_SWAP(
Op, DAG);
6497 return lowerSTACKSAVE(
Op, DAG);
6499 return lowerSTACKRESTORE(
Op, DAG);
6501 return lowerPREFETCH(
Op, DAG);
6503 return lowerINTRINSIC_W_CHAIN(
Op, DAG);
6505 return lowerINTRINSIC_WO_CHAIN(
Op, DAG);
6507 return lowerBUILD_VECTOR(
Op, DAG);
6509 return lowerVECTOR_SHUFFLE(
Op, DAG);
6511 return lowerSCALAR_TO_VECTOR(
Op, DAG);
6513 return lowerINSERT_VECTOR_ELT(
Op, DAG);
6515 return lowerEXTRACT_VECTOR_ELT(
Op, DAG);
6517 return lowerSIGN_EXTEND_VECTOR_INREG(
Op, DAG);
6519 return lowerZERO_EXTEND_VECTOR_INREG(
Op, DAG);
6531 return lowerIS_FPCLASS(
Op, DAG);
6533 return lowerGET_ROUNDING(
Op, DAG);
6535 return lowerREADCYCLECOUNTER(
Op, DAG);
6557 &SystemZ::FP128BitRegClass);
6566 SystemZ::REG_SEQUENCE, SL, MVT::f128,
6581 &SystemZ::FP128BitRegClass);
6599 switch (
N->getOpcode()) {
6603 SDValue Ops[] = {
N->getOperand(0),
N->getOperand(1) };
6606 DL, Tys, Ops, MVT::i128, MMO);
6609 if (
N->getValueType(0) == MVT::f128)
6623 SDValue Ops[] = {
N->getOperand(0), Val,
N->getOperand(2)};
6626 DL, Tys, Ops, MVT::i128, MMO);
6629 if (cast<AtomicSDNode>(
N)->getSuccessOrdering() ==
6632 MVT::Other, Res), 0);
6639 SDValue Ops[] = {
N->getOperand(0),
N->getOperand(1),
6644 DL, Tys, Ops, MVT::i128, MMO);
6655 if (
N->getValueType(0) == MVT::i128 && Src.getValueType() == MVT::f128 &&
6675#define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME
6786 OPCODE(ATOMIC_LOADW_ADD);
6787 OPCODE(ATOMIC_LOADW_SUB);
6788 OPCODE(ATOMIC_LOADW_AND);
6790 OPCODE(ATOMIC_LOADW_XOR);
6791 OPCODE(ATOMIC_LOADW_NAND);
6792 OPCODE(ATOMIC_LOADW_MIN);
6793 OPCODE(ATOMIC_LOADW_MAX);
6794 OPCODE(ATOMIC_LOADW_UMIN);
6795 OPCODE(ATOMIC_LOADW_UMAX);
6796 OPCODE(ATOMIC_CMP_SWAPW);
6799 OPCODE(ATOMIC_STORE_128);
6800 OPCODE(ATOMIC_CMP_SWAP_128);
6815bool SystemZTargetLowering::canTreatAsByteVector(
EVT VT)
const {
6816 if (!Subtarget.hasVector())
6830 DAGCombinerInfo &DCI,
6838 unsigned Opcode =
Op.getOpcode();
6841 Op =
Op.getOperand(0);
6843 canTreatAsByteVector(
Op.getValueType())) {
6852 BytesPerElement,
First))
6859 if (Byte % BytesPerElement != 0)
6862 Index = Byte / BytesPerElement;
6866 canTreatAsByteVector(
Op.getValueType())) {
6869 EVT OpVT =
Op.getValueType();
6871 if (OpBytesPerElement < BytesPerElement)
6875 unsigned End = (
Index + 1) * BytesPerElement;
6876 if (
End % OpBytesPerElement != 0)
6879 Op =
Op.getOperand(
End / OpBytesPerElement - 1);
6880 if (!
Op.getValueType().isInteger()) {
6883 DCI.AddToWorklist(
Op.getNode());
6888 DCI.AddToWorklist(
Op.getNode());
6895 canTreatAsByteVector(
Op.getValueType()) &&
6896 canTreatAsByteVector(
Op.getOperand(0).getValueType())) {
6898 EVT ExtVT =
Op.getValueType();
6899 EVT OpVT =
Op.getOperand(0).getValueType();
6902 unsigned Byte =
Index * BytesPerElement;
6903 unsigned SubByte =
Byte % ExtBytesPerElement;
6904 unsigned MinSubByte = ExtBytesPerElement - OpBytesPerElement;
6905 if (SubByte < MinSubByte ||
6906 SubByte + BytesPerElement > ExtBytesPerElement)
6909 Byte =
Byte / ExtBytesPerElement * OpBytesPerElement;
6911 Byte += SubByte - MinSubByte;
6912 if (Byte % BytesPerElement != 0)
6914 Op =
Op.getOperand(0);
6921 if (
Op.getValueType() != VecVT) {
6923 DCI.AddToWorklist(
Op.getNode());
6933SDValue SystemZTargetLowering::combineTruncateExtract(
6942 if (canTreatAsByteVector(VecVT)) {
6943 if (
auto *IndexN = dyn_cast<ConstantSDNode>(
Op.getOperand(1))) {
6946 if (BytesPerElement % TruncBytes == 0) {
6952 unsigned Scale = BytesPerElement / TruncBytes;
6953 unsigned NewIndex = (IndexN->getZExtValue() + 1) * Scale - 1;
6960 EVT ResVT = (TruncBytes < 4 ? MVT::i32 : TruncVT);
6961 return combineExtract(
DL, ResVT, VecVT, Vec, NewIndex, DCI,
true);
6969SDValue SystemZTargetLowering::combineZERO_EXTEND(
6970 SDNode *
N, DAGCombinerInfo &DCI)
const {
6974 EVT VT =
N->getValueType(0);
6976 auto *TrueOp = dyn_cast<ConstantSDNode>(N0.
getOperand(0));
6977 auto *FalseOp = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
6978 if (TrueOp && FalseOp) {
6988 DCI.CombineTo(N0.
getNode(), TruncSelect);
7018SDValue SystemZTargetLowering::combineSIGN_EXTEND_INREG(
7019 SDNode *
N, DAGCombinerInfo &DCI)
const {
7025 EVT VT =
N->getValueType(0);
7026 EVT EVT = cast<VTSDNode>(
N->getOperand(1))->getVT();
7039SDValue SystemZTargetLowering::combineSIGN_EXTEND(
7040 SDNode *
N, DAGCombinerInfo &DCI)
const {
7046 EVT VT =
N->getValueType(0);
7048 auto *SraAmt = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
7051 if (
auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.
getOperand(1))) {
7053 unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra;
7054 unsigned NewSraAmt = SraAmt->getZExtValue() + Extra;
7070SDValue SystemZTargetLowering::combineMERGE(
7071 SDNode *
N, DAGCombinerInfo &DCI)
const {
7073 unsigned Opcode =
N->getOpcode();
7081 if (Op1 ==
N->getOperand(0))
7086 if (ElemBytes <= 4) {
7094 DCI.AddToWorklist(Op1.
getNode());
7097 DCI.AddToWorklist(
Op.getNode());
7106 LoPart = HiPart =
nullptr;
7111 if (
Use.getResNo() != 0)
7116 bool IsLoPart =
true;
7141 LoPart = HiPart =
nullptr;
7146 if (
Use.getResNo() != 0)
7152 User->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
7155 switch (
User->getConstantOperandVal(1)) {
7156 case SystemZ::subreg_l64:
7161 case SystemZ::subreg_h64:
7173SDValue SystemZTargetLowering::combineLOAD(
7174 SDNode *
N, DAGCombinerInfo &DCI)
const {
7176 EVT LdVT =
N->getValueType(0);
7177 if (
auto *LN = dyn_cast<LoadSDNode>(
N)) {
7180 MVT LoadNodeVT = LN->getBasePtr().getSimpleValueType();
7181 if (PtrVT != LoadNodeVT) {
7185 return DAG.
getExtLoad(LN->getExtensionType(),
DL, LN->getValueType(0),
7186 LN->getChain(), AddrSpaceCast, LN->getMemoryVT(),
7187 LN->getMemOperand());
7205 LD->getPointerInfo(),
LD->getOriginalAlign(),
7206 LD->getMemOperand()->getFlags(),
LD->getAAInfo());
7208 DCI.CombineTo(HiPart, EltLoad,
true);
7215 LD->getPointerInfo().getWithOffset(8),
LD->getOriginalAlign(),
7216 LD->getMemOperand()->getFlags(),
LD->getAAInfo());
7218 DCI.CombineTo(LoPart, EltLoad,
true);
7225 DCI.AddToWorklist(Chain.
getNode());
7245 }
else if (
Use.getResNo() == 0)
7248 if (!Replicate || OtherUses.
empty())
7254 for (
SDNode *U : OtherUses) {
7263bool SystemZTargetLowering::canLoadStoreByteSwapped(
EVT VT)
const {
7264 if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64)
7266 if (Subtarget.hasVectorEnhancements2())
7267 if (VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v2i64 || VT == MVT::i128)
7279 for (
unsigned i = 0; i < NumElts; ++i) {
7280 if (M[i] < 0)
continue;
7281 if ((
unsigned) M[i] != NumElts - 1 - i)
7289 for (
auto *U : StoredVal->
users()) {
7291 EVT CurrMemVT = ST->getMemoryVT().getScalarType();
7294 }
else if (isa<BuildVectorSDNode>(U)) {
7350SDValue SystemZTargetLowering::combineSTORE(
7351 SDNode *
N, DAGCombinerInfo &DCI)
const {
7353 auto *SN = cast<StoreSDNode>(
N);
7354 auto &Op1 =
N->getOperand(1);
7355 EVT MemVT = SN->getMemoryVT();
7359 MVT StoreNodeVT = SN->getBasePtr().getSimpleValueType();
7360 if (PtrVT != StoreNodeVT) {
7364 return DAG.
getStore(SN->getChain(),
DL, SN->getValue(), AddrSpaceCast,
7365 SN->getPointerInfo(), SN->getOriginalAlign(),
7366 SN->getMemOperand()->getFlags(), SN->getAAInfo());
7374 if (MemVT.
isInteger() && SN->isTruncatingStore()) {
7376 combineTruncateExtract(
SDLoc(
N), MemVT, SN->getValue(), DCI)) {
7377 DCI.AddToWorklist(
Value.getNode());
7381 SN->getBasePtr(), SN->getMemoryVT(),
7382 SN->getMemOperand());
7386 if (!SN->isTruncatingStore() &&
7397 N->getOperand(0), BSwapOp,
N->getOperand(2)
7402 Ops, MemVT, SN->getMemOperand());
7405 if (!SN->isTruncatingStore() &&
7408 Subtarget.hasVectorEnhancements2()) {
7418 Ops, MemVT, SN->getMemOperand());
7423 if (!SN->isTruncatingStore() &&
7426 N->getOperand(0).reachesChainWithoutSideEffects(
SDValue(Op1.
getNode(), 1))) {
7430 Ops, MemVT, SN->getMemOperand());
7440 DAG.
getStore(SN->getChain(),
DL, HiPart, SN->getBasePtr(),
7441 SN->getPointerInfo(), SN->getOriginalAlign(),
7442 SN->getMemOperand()->getFlags(), SN->getAAInfo());
7447 SN->getPointerInfo().getWithOffset(8),
7448 SN->getOriginalAlign(),
7449 SN->getMemOperand()->
getFlags(), SN->getAAInfo());
7469 if (
C->getAPIntValue().getBitWidth() > 64 ||
C->isAllOnes() ||
7473 APInt Val =
C->getAPIntValue();
7476 assert(SN->isTruncatingStore() &&
7477 "Non-truncating store and immediate value does not fit?");
7478 Val = Val.
trunc(TotBytes * 8);
7482 if (VCI.isVectorConstantLegal(Subtarget) &&
7491 auto FindReplicatedReg = [&](
SDValue MulOp) {
7492 EVT MulVT = MulOp.getValueType();
7493 if (MulOp->getOpcode() ==
ISD::MUL &&
7494 (MulVT == MVT::i16 || MulVT == MVT::i32 || MulVT == MVT::i64)) {
7498 WordVT =
LHS->getOperand(0).getValueType();
7500 WordVT = cast<VTSDNode>(
LHS->getOperand(1))->getVT();
7504 if (
auto *
C = dyn_cast<ConstantSDNode>(MulOp->getOperand(1))) {
7506 APInt(MulVT.getSizeInBits(),
C->getZExtValue()));
7507 if (VCI.isVectorConstantLegal(Subtarget) &&
7509 WordVT == VCI.VecVT.getScalarType())
7515 if (isa<BuildVectorSDNode>(Op1) &&
7518 if (
auto *
C = dyn_cast<ConstantSDNode>(SplatVal))
7521 FindReplicatedReg(SplatVal);
7523 if (
auto *
C = dyn_cast<ConstantSDNode>(Op1))
7526 FindReplicatedReg(Op1);
7531 "Bad type handling");
7536 SN->getBasePtr(), SN->getMemOperand());
7543SDValue SystemZTargetLowering::combineVECTOR_SHUFFLE(
7544 SDNode *
N, DAGCombinerInfo &DCI)
const {
7548 N->getOperand(0).hasOneUse() &&
7549 Subtarget.hasVectorEnhancements2()) {
7564 Ops,
LD->getMemoryVT(),
LD->getMemOperand());
7568 DCI.CombineTo(
N, ESLoad);
7572 DCI.CombineTo(
Load.getNode(), ESLoad, ESLoad.
getValue(1));
7582SDValue SystemZTargetLowering::combineEXTRACT_VECTOR_ELT(
7583 SDNode *
N, DAGCombinerInfo &DCI)
const {
7586 if (!Subtarget.hasVector())
7592 Op.getValueType().isVector() &&
7593 Op.getOperand(0).getValueType().isVector() &&
7594 Op.getValueType().getVectorNumElements() ==
7595 Op.getOperand(0).getValueType().getVectorNumElements())
7596 Op =
Op.getOperand(0);
7600 EVT VecVT =
Op.getValueType();
7603 Op.getOperand(0),
N->getOperand(1));
7604 DCI.AddToWorklist(
Op.getNode());
7606 if (EltVT !=
N->getValueType(0)) {
7607 DCI.AddToWorklist(
Op.getNode());
7614 if (
auto *IndexN = dyn_cast<ConstantSDNode>(
N->getOperand(1))) {
7617 if (canTreatAsByteVector(VecVT))
7618 return combineExtract(
SDLoc(
N),
N->getValueType(0), VecVT, Op0,
7619 IndexN->getZExtValue(), DCI,
false);
7624SDValue SystemZTargetLowering::combineJOIN_DWORDS(
7625 SDNode *
N, DAGCombinerInfo &DCI)
const {
7628 if (
N->getOperand(0) ==
N->getOperand(1))
7639 if (Chain1 == Chain2)
7647SDValue SystemZTargetLowering::combineFP_ROUND(
7648 SDNode *
N, DAGCombinerInfo &DCI)
const {
7650 if (!Subtarget.hasVector())
7659 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
7662 if (
N->getValueType(0) == MVT::f32 && Op0.
hasOneUse() &&
7668 for (
auto *U : Vec->
users()) {
7669 if (U != Op0.
getNode() &&
U->hasOneUse() &&
7671 U->getOperand(0) == Vec &&
7673 U->getConstantOperandVal(1) == 1) {
7675 if (OtherRound.
getOpcode() ==
N->getOpcode() &&
7679 if (
N->isStrictFPOpcode()) {
7684 {MVT::v4f32, MVT::Other}, {Chain, Vec});
7689 DCI.AddToWorklist(VRound.
getNode());
7693 DCI.AddToWorklist(Extract1.
getNode());
7702 N->getVTList(), Extract0, Chain);
7711SDValue SystemZTargetLowering::combineFP_EXTEND(
7712 SDNode *
N, DAGCombinerInfo &DCI)
const {
7714 if (!Subtarget.hasVector())
7723 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
7726 if (
N->getValueType(0) == MVT::f64 && Op0.
hasOneUse() &&
7732 for (
auto *U : Vec->
users()) {
7733 if (U != Op0.
getNode() &&
U->hasOneUse() &&
7735 U->getOperand(0) == Vec &&
7737 U->getConstantOperandVal(1) == 2) {
7739 if (OtherExtend.
getOpcode() ==
N->getOpcode() &&
7743 if (
N->isStrictFPOpcode()) {
7748 {MVT::v2f64, MVT::Other}, {Chain, Vec});
7753 DCI.AddToWorklist(VExtend.
getNode());
7757 DCI.AddToWorklist(Extract1.
getNode());
7766 N->getVTList(), Extract0, Chain);
7775SDValue SystemZTargetLowering::combineINT_TO_FP(
7776 SDNode *
N, DAGCombinerInfo &DCI)
const {
7781 unsigned Opcode =
N->getOpcode();
7782 EVT OutVT =
N->getValueType(0);
7786 unsigned InScalarBits =
Op->getValueType(0).getScalarSizeInBits();
7792 if (OutLLVMTy->
isVectorTy() && OutScalarBits > InScalarBits &&
7793 OutScalarBits <= 64) {
7794 unsigned NumElts = cast<FixedVectorType>(OutLLVMTy)->getNumElements();
7797 unsigned ExtOpcode =
7805SDValue SystemZTargetLowering::combineBSWAP(
7806 SDNode *
N, DAGCombinerInfo &DCI)
const {
7810 N->getOperand(0).hasOneUse() &&
7811 canLoadStoreByteSwapped(
N->getValueType(0))) {
7820 EVT LoadVT =
N->getValueType(0);
7821 if (LoadVT == MVT::i16)
7826 Ops,
LD->getMemoryVT(),
LD->getMemOperand());
7830 if (
N->getValueType(0) == MVT::i16)
7835 DCI.CombineTo(
N, ResVal);
7839 DCI.CombineTo(
Load.getNode(), ResVal, BSLoad.
getValue(1));
7848 Op.getValueType().isVector() &&
7849 Op.getOperand(0).getValueType().isVector() &&
7850 Op.getValueType().getVectorNumElements() ==
7851 Op.getOperand(0).getValueType().getVectorNumElements())
7852 Op =
Op.getOperand(0);
7864 (canLoadStoreByteSwapped(
N->getValueType(0)) &&
7866 EVT VecVT =
N->getValueType(0);
7867 EVT EltVT =
N->getValueType(0).getVectorElementType();
7870 DCI.AddToWorklist(Vec.
getNode());
7874 DCI.AddToWorklist(Elt.
getNode());
7877 DCI.AddToWorklist(Vec.
getNode());
7879 DCI.AddToWorklist(Elt.
getNode());
7887 if (SV &&
Op.hasOneUse()) {
7895 EVT VecVT =
N->getValueType(0);
7898 DCI.AddToWorklist(Op0.
getNode());
7902 DCI.AddToWorklist(Op1.
getNode());
7905 DCI.AddToWorklist(Op0.
getNode());
7907 DCI.AddToWorklist(Op1.
getNode());
7929 auto *CompareRHS = dyn_cast<ConstantSDNode>(ICmp->getOperand(1));
7936 bool Invert =
false;
7943 auto *TrueVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(0));
7946 auto *FalseVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1));
7949 if (CompareRHS->getZExtValue() == FalseVal->getZExtValue())
7951 else if (CompareRHS->getZExtValue() != TrueVal->getZExtValue())
7955 auto *NewCCValid = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(2));
7956 auto *NewCCMask = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(3));
7957 if (!NewCCValid || !NewCCMask)
7959 CCValid = NewCCValid->getZExtValue();
7960 CCMask = NewCCMask->getZExtValue();
7970 if (CompareLHS->getOpcode() ==
ISD::SRA) {
7971 auto *SRACount = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1));
7972 if (!SRACount || SRACount->getZExtValue() != 30)
7974 auto *SHL = CompareLHS->getOperand(0).getNode();
7977 auto *SHLCount = dyn_cast<ConstantSDNode>(SHL->getOperand(1));
7980 auto *IPM = SHL->getOperand(0).getNode();
7985 if (!CompareLHS->hasOneUse())
7988 if (CompareRHS->getZExtValue() != 0)
7995 CCReg = IPM->getOperand(0);
8002SDValue SystemZTargetLowering::combineBR_CCMASK(
8003 SDNode *
N, DAGCombinerInfo &DCI)
const {
8007 auto *CCValid = dyn_cast<ConstantSDNode>(
N->getOperand(1));
8008 auto *CCMask = dyn_cast<ConstantSDNode>(
N->getOperand(2));
8009 if (!CCValid || !CCMask)
8012 int CCValidVal = CCValid->getZExtValue();
8013 int CCMaskVal = CCMask->getZExtValue();
8022 N->getOperand(3), CCReg);
8026SDValue SystemZTargetLowering::combineSELECT_CCMASK(
8027 SDNode *
N, DAGCombinerInfo &DCI)
const {
8031 auto *CCValid = dyn_cast<ConstantSDNode>(
N->getOperand(2));
8032 auto *CCMask = dyn_cast<ConstantSDNode>(
N->getOperand(3));
8033 if (!CCValid || !CCMask)
8036 int CCValidVal = CCValid->getZExtValue();
8037 int CCMaskVal = CCMask->getZExtValue();
8042 N->getOperand(0),
N->getOperand(1),
8050SDValue SystemZTargetLowering::combineGET_CCMASK(
8051 SDNode *
N, DAGCombinerInfo &DCI)
const {
8054 auto *CCValid = dyn_cast<ConstantSDNode>(
N->getOperand(1));
8055 auto *CCMask = dyn_cast<ConstantSDNode>(
N->getOperand(2));
8056 if (!CCValid || !CCMask)
8058 int CCValidVal = CCValid->getZExtValue();
8059 int CCMaskVal = CCMask->getZExtValue();
8067 auto *SelectCCValid = dyn_cast<ConstantSDNode>(
Select->getOperand(2));
8068 auto *SelectCCMask = dyn_cast<ConstantSDNode>(
Select->getOperand(3));
8069 if (!SelectCCValid || !SelectCCMask)
8071 int SelectCCValidVal = SelectCCValid->getZExtValue();
8072 int SelectCCMaskVal = SelectCCMask->getZExtValue();
8074 auto *
TrueVal = dyn_cast<ConstantSDNode>(
Select->getOperand(0));
8075 auto *
FalseVal = dyn_cast<ConstantSDNode>(
Select->getOperand(1));
8076 if (!TrueVal || !FalseVal)
8080 else if (
TrueVal->getZExtValue() == 0 &&
FalseVal->getZExtValue() == 1)
8081 SelectCCMaskVal ^= SelectCCValidVal;
8085 if (SelectCCValidVal & ~CCValidVal)
8087 if (SelectCCMaskVal != (CCMaskVal & SelectCCValidVal))
8090 return Select->getOperand(4);
8093SDValue SystemZTargetLowering::combineIntDIVREM(
8094 SDNode *
N, DAGCombinerInfo &DCI)
const {
8096 EVT VT =
N->getValueType(0);
8110SDValue SystemZTargetLowering::combineINTRINSIC(
8111 SDNode *
N, DAGCombinerInfo &DCI)
const {
8114 unsigned Id =
N->getConstantOperandVal(1);
8118 case Intrinsic::s390_vll:
8119 case Intrinsic::s390_vlrl:
8120 if (
auto *
C = dyn_cast<ConstantSDNode>(
N->getOperand(2)))
8121 if (
C->getZExtValue() >= 15)
8126 case Intrinsic::s390_vstl:
8127 case Intrinsic::s390_vstrl:
8128 if (
auto *
C = dyn_cast<ConstantSDNode>(
N->getOperand(3)))
8129 if (
C->getZExtValue() >= 15)
8140 return N->getOperand(0);
8146 switch(
N->getOpcode()) {
8171 case ISD::UREM:
return combineIntDIVREM(
N, DCI);
8183 EVT VT =
Op.getValueType();
8186 unsigned Opcode =
Op.getOpcode();
8188 unsigned Id =
Op.getConstantOperandVal(0);
8190 case Intrinsic::s390_vpksh:
8191 case Intrinsic::s390_vpksf:
8192 case Intrinsic::s390_vpksg:
8193 case Intrinsic::s390_vpkshs:
8194 case Intrinsic::s390_vpksfs:
8195 case Intrinsic::s390_vpksgs:
8196 case Intrinsic::s390_vpklsh:
8197 case Intrinsic::s390_vpklsf:
8198 case Intrinsic::s390_vpklsg:
8199 case Intrinsic::s390_vpklshs:
8200 case Intrinsic::s390_vpklsfs:
8201 case Intrinsic::s390_vpklsgs:
8203 SrcDemE = DemandedElts;
8206 SrcDemE = SrcDemE.
trunc(NumElts / 2);
8209 case Intrinsic::s390_vuphb:
8210 case Intrinsic::s390_vuphh:
8211 case Intrinsic::s390_vuphf:
8212 case Intrinsic::s390_vuplhb:
8213 case Intrinsic::s390_vuplhh:
8214 case Intrinsic::s390_vuplhf:
8215 SrcDemE =
APInt(NumElts * 2, 0);
8218 case Intrinsic::s390_vuplb:
8219 case Intrinsic::s390_vuplhw:
8220 case Intrinsic::s390_vuplf:
8221 case Intrinsic::s390_vupllb:
8222 case Intrinsic::s390_vupllh:
8223 case Intrinsic::s390_vupllf:
8224 SrcDemE =
APInt(NumElts * 2, 0);
8227 case Intrinsic::s390_vpdi: {
8229 SrcDemE =
APInt(NumElts, 0);
8230 if (!DemandedElts[OpNo - 1])
8232 unsigned Mask =
Op.getConstantOperandVal(3);
8233 unsigned MaskBit = ((OpNo - 1) ? 1 : 4);
8235 SrcDemE.
setBit((Mask & MaskBit)? 1 : 0);
8238 case Intrinsic::s390_vsldb: {
8240 assert(VT == MVT::v16i8 &&
"Unexpected type.");
8241 unsigned FirstIdx =
Op.getConstantOperandVal(3);
8242 assert (FirstIdx > 0 && FirstIdx < 16 &&
"Unused operand.");
8243 unsigned NumSrc0Els = 16 - FirstIdx;
8244 SrcDemE =
APInt(NumElts, 0);
8246 APInt DemEls = DemandedElts.
trunc(NumSrc0Els);
8249 APInt DemEls = DemandedElts.
lshr(NumSrc0Els);
8254 case Intrinsic::s390_vperm:
8265 SrcDemE =
APInt(1, 1);
8268 SrcDemE = DemandedElts;
8279 const APInt &DemandedElts,
8294 const APInt &DemandedElts,
8296 unsigned Depth)
const {
8300 unsigned tmp0, tmp1;
8305 EVT VT =
Op.getValueType();
8306 if (
Op.getResNo() != 0 || VT == MVT::Untyped)
8309 "KnownBits does not match VT in bitwidth");
8312 "DemandedElts does not match VT number of elements");
8314 unsigned Opcode =
Op.getOpcode();
8316 bool IsLogical =
false;
8317 unsigned Id =
Op.getConstantOperandVal(0);
8319 case Intrinsic::s390_vpksh:
8320 case Intrinsic::s390_vpksf:
8321 case Intrinsic::s390_vpksg:
8322 case Intrinsic::s390_vpkshs:
8323 case Intrinsic::s390_vpksfs:
8324 case Intrinsic::s390_vpksgs:
8325 case Intrinsic::s390_vpklsh:
8326 case Intrinsic::s390_vpklsf:
8327 case Intrinsic::s390_vpklsg:
8328 case Intrinsic::s390_vpklshs:
8329 case Intrinsic::s390_vpklsfs:
8330 case Intrinsic::s390_vpklsgs:
8331 case Intrinsic::s390_vpdi:
8332 case Intrinsic::s390_vsldb:
8333 case Intrinsic::s390_vperm:
8336 case Intrinsic::s390_vuplhb:
8337 case Intrinsic::s390_vuplhh:
8338 case Intrinsic::s390_vuplhf:
8339 case Intrinsic::s390_vupllb:
8340 case Intrinsic::s390_vupllh:
8341 case Intrinsic::s390_vupllf:
8344 case Intrinsic::s390_vuphb:
8345 case Intrinsic::s390_vuphh:
8346 case Intrinsic::s390_vuphf:
8347 case Intrinsic::s390_vuplb:
8348 case Intrinsic::s390_vuplhw:
8349 case Intrinsic::s390_vuplf: {
8391 if (
LHS == 1)
return 1;
8394 if (
RHS == 1)
return 1;
8395 unsigned Common = std::min(
LHS,
RHS);
8396 unsigned SrcBitWidth =
Op.getOperand(OpNo).getScalarValueSizeInBits();
8397 EVT VT =
Op.getValueType();
8399 if (SrcBitWidth > VTBits) {
8400 unsigned SrcExtraBits = SrcBitWidth - VTBits;
8401 if (Common > SrcExtraBits)
8402 return (Common - SrcExtraBits);
8405 assert (SrcBitWidth == VTBits &&
"Expected operands of same bitwidth.");
8412 unsigned Depth)
const {
8413 if (
Op.getResNo() != 0)
8415 unsigned Opcode =
Op.getOpcode();
8417 unsigned Id =
Op.getConstantOperandVal(0);
8419 case Intrinsic::s390_vpksh:
8420 case Intrinsic::s390_vpksf:
8421 case Intrinsic::s390_vpksg:
8422 case Intrinsic::s390_vpkshs:
8423 case Intrinsic::s390_vpksfs:
8424 case Intrinsic::s390_vpksgs:
8425 case Intrinsic::s390_vpklsh:
8426 case Intrinsic::s390_vpklsf:
8427 case Intrinsic::s390_vpklsg:
8428 case Intrinsic::s390_vpklshs:
8429 case Intrinsic::s390_vpklsfs:
8430 case Intrinsic::s390_vpklsgs:
8431 case Intrinsic::s390_vpdi:
8432 case Intrinsic::s390_vsldb:
8433 case Intrinsic::s390_vperm:
8435 case Intrinsic::s390_vuphb:
8436 case Intrinsic::s390_vuphh:
8437 case Intrinsic::s390_vuphf:
8438 case Intrinsic::s390_vuplb:
8439 case Intrinsic::s390_vuplhw:
8440 case Intrinsic::s390_vuplf: {
8444 EVT VT =
Op.getValueType();
8468 switch (
Op->getOpcode()) {
8481 "Unexpected stack alignment");
8484 unsigned StackProbeSize =
8487 StackProbeSize &= ~(StackAlign - 1);
8488 return StackProbeSize ? StackProbeSize : StackAlign;
8505 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
8511 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
8537 if (Succ->isLiveIn(SystemZ::CC))
8548 switch (
MI.getOpcode()) {
8549 case SystemZ::Select32:
8550 case SystemZ::Select64:
8551 case SystemZ::Select128:
8552 case SystemZ::SelectF32:
8553 case SystemZ::SelectF64:
8554 case SystemZ::SelectF128:
8555 case SystemZ::SelectVR32:
8556 case SystemZ::SelectVR64:
8557 case SystemZ::SelectVR128:
8589 for (
auto *
MI : Selects) {
8590 Register DestReg =
MI->getOperand(0).getReg();
8591 Register TrueReg =
MI->getOperand(1).getReg();
8592 Register FalseReg =
MI->getOperand(2).getReg();
8597 if (
MI->getOperand(4).getImm() == (CCValid ^ CCMask))
8600 if (
auto It = RegRewriteTable.
find(TrueReg); It != RegRewriteTable.
end())
8601 TrueReg = It->second.first;
8603 if (
auto It = RegRewriteTable.
find(FalseReg); It != RegRewriteTable.
end())
8604 FalseReg = It->second.second;
8607 BuildMI(*SinkMBB, SinkInsertionPoint,
DL,
TII->get(SystemZ::PHI), DestReg)
8612 RegRewriteTable[DestReg] = std::make_pair(TrueReg, FalseReg);
8624 assert(TFL->hasReservedCallFrame(MF) &&
8625 "ADJSTACKDOWN and ADJSTACKUP should be no-ops");
8630 uint32_t NumBytes =
MI.getOperand(0).getImm();
8635 MI.eraseFromParent();
8646 unsigned CCValid =
MI.getOperand(3).getImm();
8647 unsigned CCMask =
MI.getOperand(4).getImm();
8659 assert(NextMI.getOperand(3).getImm() == CCValid &&
8660 "Bad CCValid operands since CC was not redefined.");
8661 if (NextMI.getOperand(4).getImm() == CCMask ||
8662 NextMI.getOperand(4).getImm() == (CCValid ^ CCMask)) {
8668 if (NextMI.definesRegister(SystemZ::CC,
nullptr) ||
8669 NextMI.usesCustomInsertionHook())
8672 for (
auto *SelMI : Selects)
8673 if (NextMI.readsVirtualRegister(SelMI->getOperand(0).getReg())) {
8677 if (NextMI.isDebugInstr()) {
8679 assert(NextMI.isDebugValue() &&
"Unhandled debug opcode.");
8682 }
else if (
User || ++Count > 20)
8687 bool CCKilled = (LastMI->
killsRegister(SystemZ::CC,
nullptr) ||
8719 for (
auto *SelMI : Selects)
8720 SelMI->eraseFromParent();
8723 for (
auto *DbgMI : DbgValues)
8724 MBB->
splice(InsertPos, StartMBB, DbgMI);
8735 unsigned StoreOpcode,
8736 unsigned STOCOpcode,
8737 bool Invert)
const {
8742 int64_t Disp =
MI.getOperand(2).getImm();
8743 Register IndexReg =
MI.getOperand(3).getReg();
8744 unsigned CCValid =
MI.getOperand(4).getImm();
8745 unsigned CCMask =
MI.getOperand(5).getImm();
8748 StoreOpcode =
TII->getOpcodeForOffset(StoreOpcode, Disp);
8753 for (
auto *
I :
MI.memoperands())
8762 if (STOCOpcode && !IndexReg && Subtarget.hasLoadStoreOnCond()) {
8774 MI.eraseFromParent();
8788 if (!
MI.killsRegister(SystemZ::CC,
nullptr) &&
8815 MI.eraseFromParent();
8851 int HiOpcode =
Unsigned? SystemZ::VECLG : SystemZ::VECG;
8870 Register Temp =
MRI.createVirtualRegister(&SystemZ::VR128BitRegClass);
8878 MI.eraseFromParent();
8889 bool Invert)
const {
8898 int64_t Disp =
MI.getOperand(2).getImm();
8900 Register BitShift =
MI.getOperand(4).getReg();
8901 Register NegBitShift =
MI.getOperand(5).getReg();
8902 unsigned BitSize =
MI.getOperand(6).getImm();
8906 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
8907 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
8908 assert(LOpcode && CSOpcode &&
"Displacement out of range");
8911 Register OrigVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8912 Register OldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8913 Register NewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8914 Register RotatedOldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8915 Register RotatedNewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8946 Register Tmp =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8951 }
else if (BinOpcode)
8974 MI.eraseFromParent();
8985 unsigned KeepOldMask)
const {
8993 int64_t Disp =
MI.getOperand(2).getImm();
8995 Register BitShift =
MI.getOperand(4).getReg();
8996 Register NegBitShift =
MI.getOperand(5).getReg();
8997 unsigned BitSize =
MI.getOperand(6).getImm();
9001 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
9002 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
9003 assert(LOpcode && CSOpcode &&
"Displacement out of range");
9006 Register OrigVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
9007 Register OldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
9008 Register NewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
9009 Register RotatedOldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
9010 Register RotatedAltVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
9011 Register RotatedNewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
9078 MI.eraseFromParent();
9094 int64_t Disp =
MI.getOperand(2).getImm();
9096 Register OrigSwapVal =
MI.getOperand(4).getReg();
9097 Register BitShift =
MI.getOperand(5).getReg();
9098 Register NegBitShift =
MI.getOperand(6).getReg();
9099 int64_t BitSize =
MI.getOperand(7).getImm();
9105 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
9106 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
9107 unsigned ZExtOpcode = BitSize == 8 ? SystemZ::LLCR : SystemZ::LLHR;
9108 assert(LOpcode && CSOpcode &&
"Displacement out of range");
9111 Register OrigOldVal =
MRI.createVirtualRegister(RC);
9114 Register StoreVal =
MRI.createVirtualRegister(RC);
9115 Register OldValRot =
MRI.createVirtualRegister(RC);
9116 Register RetryOldVal =
MRI.createVirtualRegister(RC);
9117 Register RetrySwapVal =
MRI.createVirtualRegister(RC);
9192 if (!
MI.registerDefIsDead(SystemZ::CC,
nullptr))
9195 MI.eraseFromParent();
9208 .
add(
MI.getOperand(1))
9209 .
addImm(SystemZ::subreg_h64)
9210 .
add(
MI.getOperand(2))
9211 .
addImm(SystemZ::subreg_l64);
9212 MI.eraseFromParent();
9221 bool ClearEven)
const {
9229 Register In128 =
MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
9233 Register NewIn128 =
MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
9234 Register Zero64 =
MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
9245 MI.eraseFromParent();
9252 unsigned Opcode,
bool IsMemset)
const {
9259 uint64_t DestDisp =
MI.getOperand(1).getImm();
9265 if (!isUInt<12>(Disp)) {
9266 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9267 unsigned Opcode =
TII->getOpcodeForOffset(SystemZ::LA, Disp);
9277 SrcDisp =
MI.getOperand(3).getImm();
9280 SrcDisp = DestDisp++;
9281 foldDisplIfNeeded(DestBase, DestDisp);
9285 bool IsImmForm = LengthMO.
isImm();
9286 bool IsRegForm = !IsImmForm;
9293 unsigned Length) ->
void {
9312 bool NeedsLoop =
false;
9314 Register LenAdjReg = SystemZ::NoRegister;
9316 ImmLength = LengthMO.
getImm();
9317 ImmLength += IsMemset ? 2 : 1;
9318 if (ImmLength == 0) {
9319 MI.eraseFromParent();
9322 if (Opcode == SystemZ::CLC) {
9323 if (ImmLength > 3 * 256)
9333 }
else if (ImmLength > 6 * 256)
9341 LenAdjReg = LengthMO.
getReg();
9347 (Opcode == SystemZ::CLC && (ImmLength > 256 || NeedsLoop)
9353 MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
9355 TII->loadImmediate(*
MBB,
MI, StartCountReg, ImmLength / 256);
9366 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9370 if (DestBase.
isReg() && DestBase.
getReg() == SystemZ::NoRegister)
9371 DestBase = loadZeroAddress();
9372 if (SrcBase.
isReg() && SrcBase.
getReg() == SystemZ::NoRegister)
9373 SrcBase = HaveSingleBase ? DestBase : loadZeroAddress();
9383 (HaveSingleBase ? StartSrcReg :
forceReg(
MI, DestBase,
TII));
9386 Register ThisSrcReg =
MRI.createVirtualRegister(RC);
9388 (HaveSingleBase ? ThisSrcReg :
MRI.createVirtualRegister(RC));
9389 Register NextSrcReg =
MRI.createVirtualRegister(RC);
9391 (HaveSingleBase ? NextSrcReg :
MRI.createVirtualRegister(RC));
9392 RC = &SystemZ::GR64BitRegClass;
9393 Register ThisCountReg =
MRI.createVirtualRegister(RC);
9394 Register NextCountReg =
MRI.createVirtualRegister(RC);
9420 MBB = MemsetOneCheckMBB;
9463 if (EndMBB && !ImmLength)
9485 if (!HaveSingleBase)
9492 if (Opcode == SystemZ::MVC)
9519 if (!HaveSingleBase)
9541 Register RemSrcReg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9542 Register RemDestReg = HaveSingleBase ? RemSrcReg
9543 :
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9547 if (!HaveSingleBase)
9563 if (Opcode != SystemZ::MVC) {
9573 while (ImmLength > 0) {
9577 foldDisplIfNeeded(DestBase, DestDisp);
9578 foldDisplIfNeeded(SrcBase, SrcDisp);
9579 insertMemMemOp(
MBB,
MI, DestBase, DestDisp, SrcBase, SrcDisp, ThisLength);
9580 DestDisp += ThisLength;
9581 SrcDisp += ThisLength;
9582 ImmLength -= ThisLength;
9585 if (EndMBB && ImmLength > 0) {
9601 MI.eraseFromParent();
9614 uint64_t End1Reg =
MI.getOperand(0).getReg();
9615 uint64_t Start1Reg =
MI.getOperand(1).getReg();
9616 uint64_t Start2Reg =
MI.getOperand(2).getReg();
9617 uint64_t CharReg =
MI.getOperand(3).getReg();
9620 uint64_t This1Reg =
MRI.createVirtualRegister(RC);
9621 uint64_t This2Reg =
MRI.createVirtualRegister(RC);
9660 MI.eraseFromParent();
9667 bool NoFloat)
const {
9673 MI.setDesc(
TII->get(Opcode));
9677 uint64_t Control =
MI.getOperand(2).getImm();
9678 static const unsigned GPRControlBit[16] = {
9679 0x8000, 0x8000, 0x4000, 0x4000, 0x2000, 0x2000, 0x1000, 0x1000,
9680 0x0800, 0x0800, 0x0400, 0x0400, 0x0200, 0x0200, 0x0100, 0x0100
9682 Control |= GPRControlBit[15];
9684 Control |= GPRControlBit[11];
9685 MI.getOperand(2).setImm(Control);
9688 for (
int I = 0;
I < 16;
I++) {
9689 if ((Control & GPRControlBit[
I]) == 0) {
9696 if (!NoFloat && (Control & 4) != 0) {
9697 if (Subtarget.hasVector()) {
9729 MI.eraseFromParent();
9742 Register SizeReg =
MI.getOperand(2).getReg();
9754 Register PHIReg =
MRI->createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9755 Register IncReg =
MRI->createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9820 MI.eraseFromParent();
9824SDValue SystemZTargetLowering::
9835 switch (
MI.getOpcode()) {
9836 case SystemZ::ADJCALLSTACKDOWN:
9837 case SystemZ::ADJCALLSTACKUP:
9838 return emitAdjCallStack(
MI,
MBB);
9840 case SystemZ::Select32:
9841 case SystemZ::Select64:
9842 case SystemZ::Select128:
9843 case SystemZ::SelectF32:
9844 case SystemZ::SelectF64:
9845 case SystemZ::SelectF128:
9846 case SystemZ::SelectVR32:
9847 case SystemZ::SelectVR64:
9848 case SystemZ::SelectVR128:
9849 return emitSelect(
MI,
MBB);
9851 case SystemZ::CondStore8Mux:
9852 return emitCondStore(
MI,
MBB, SystemZ::STCMux, 0,
false);
9853 case SystemZ::CondStore8MuxInv:
9854 return emitCondStore(
MI,
MBB, SystemZ::STCMux, 0,
true);
9855 case SystemZ::CondStore16Mux:
9856 return emitCondStore(
MI,
MBB, SystemZ::STHMux, 0,
false);
9857 case SystemZ::CondStore16MuxInv:
9858 return emitCondStore(
MI,
MBB, SystemZ::STHMux, 0,
true);
9859 case SystemZ::CondStore32Mux:
9860 return emitCondStore(
MI,
MBB, SystemZ::STMux, SystemZ::STOCMux,
false);
9861 case SystemZ::CondStore32MuxInv:
9862 return emitCondStore(
MI,
MBB, SystemZ::STMux, SystemZ::STOCMux,
true);
9863 case SystemZ::CondStore8:
9864 return emitCondStore(
MI,
MBB, SystemZ::STC, 0,
false);
9865 case SystemZ::CondStore8Inv:
9866 return emitCondStore(
MI,
MBB, SystemZ::STC, 0,
true);
9867 case SystemZ::CondStore16:
9868 return emitCondStore(
MI,
MBB, SystemZ::STH, 0,
false);
9869 case SystemZ::CondStore16Inv:
9870 return emitCondStore(
MI,
MBB, SystemZ::STH, 0,
true);
9871 case SystemZ::CondStore32:
9872 return emitCondStore(
MI,
MBB, SystemZ::ST, SystemZ::STOC,
false);
9873 case SystemZ::CondStore32Inv:
9874 return emitCondStore(
MI,
MBB, SystemZ::ST, SystemZ::STOC,
true);
9875 case SystemZ::CondStore64:
9876 return emitCondStore(
MI,
MBB, SystemZ::STG, SystemZ::STOCG,
false);
9877 case SystemZ::CondStore64Inv:
9878 return emitCondStore(
MI,
MBB, SystemZ::STG, SystemZ::STOCG,
true);
9879 case SystemZ::CondStoreF32:
9880 return emitCondStore(
MI,
MBB, SystemZ::STE, 0,
false);
9881 case SystemZ::CondStoreF32Inv:
9882 return emitCondStore(
MI,
MBB, SystemZ::STE, 0,
true);
9883 case SystemZ::CondStoreF64:
9884 return emitCondStore(
MI,
MBB, SystemZ::STD, 0,
false);
9885 case SystemZ::CondStoreF64Inv:
9886 return emitCondStore(
MI,
MBB, SystemZ::STD, 0,
true);
9888 case SystemZ::SCmp128Hi:
9889 return emitICmp128Hi(
MI,
MBB,
false);
9890 case SystemZ::UCmp128Hi:
9891 return emitICmp128Hi(
MI,
MBB,
true);
9893 case SystemZ::PAIR128:
9894 return emitPair128(
MI,
MBB);
9895 case SystemZ::AEXT128:
9896 return emitExt128(
MI,
MBB,
false);
9897 case SystemZ::ZEXT128:
9898 return emitExt128(
MI,
MBB,
true);
9900 case SystemZ::ATOMIC_SWAPW:
9901 return emitAtomicLoadBinary(
MI,
MBB, 0);
9903 case SystemZ::ATOMIC_LOADW_AR:
9904 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::AR);
9905 case SystemZ::ATOMIC_LOADW_AFI:
9906 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::AFI);
9908 case SystemZ::ATOMIC_LOADW_SR:
9909 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::SR);
9911 case SystemZ::ATOMIC_LOADW_NR:
9912 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NR);
9913 case SystemZ::ATOMIC_LOADW_NILH:
9914 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NILH);
9916 case SystemZ::ATOMIC_LOADW_OR:
9917 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::OR);
9918 case SystemZ::ATOMIC_LOADW_OILH:
9919 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::OILH);
9921 case SystemZ::ATOMIC_LOADW_XR:
9922 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::XR);
9923 case SystemZ::ATOMIC_LOADW_XILF:
9924 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::XILF);
9926 case SystemZ::ATOMIC_LOADW_NRi:
9927 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NR,
true);
9928 case SystemZ::ATOMIC_LOADW_NILHi:
9929 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NILH,
true);
9931 case SystemZ::ATOMIC_LOADW_MIN:
9933 case SystemZ::ATOMIC_LOADW_MAX:
9935 case SystemZ::ATOMIC_LOADW_UMIN:
9937 case SystemZ::ATOMIC_LOADW_UMAX:
9940 case SystemZ::ATOMIC_CMP_SWAPW:
9941 return emitAtomicCmpSwapW(
MI,
MBB);
9942 case SystemZ::MVCImm:
9943 case SystemZ::MVCReg:
9944 return emitMemMemWrapper(
MI,
MBB, SystemZ::MVC);
9945 case SystemZ::NCImm:
9946 return emitMemMemWrapper(
MI,
MBB, SystemZ::NC);
9947 case SystemZ::OCImm:
9948 return emitMemMemWrapper(
MI,
MBB, SystemZ::OC);
9949 case SystemZ::XCImm:
9950 case SystemZ::XCReg:
9951 return emitMemMemWrapper(
MI,
MBB, SystemZ::XC);
9952 case SystemZ::CLCImm:
9953 case SystemZ::CLCReg:
9954 return emitMemMemWrapper(
MI,
MBB, SystemZ::CLC);
9955 case SystemZ::MemsetImmImm:
9956 case SystemZ::MemsetImmReg:
9957 case SystemZ::MemsetRegImm:
9958 case SystemZ::MemsetRegReg:
9959 return emitMemMemWrapper(
MI,
MBB, SystemZ::MVC,
true);
9960 case SystemZ::CLSTLoop:
9961 return emitStringWrapper(
MI,
MBB, SystemZ::CLST);
9962 case SystemZ::MVSTLoop:
9963 return emitStringWrapper(
MI,
MBB, SystemZ::MVST);
9964 case SystemZ::SRSTLoop:
9965 return emitStringWrapper(
MI,
MBB, SystemZ::SRST);
9966 case SystemZ::TBEGIN:
9967 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGIN,
false);
9968 case SystemZ::TBEGIN_nofloat:
9969 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGIN,
true);
9970 case SystemZ::TBEGINC:
9971 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGINC,
true);
9972 case SystemZ::LTEBRCompare_Pseudo:
9973 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTEBR);
9974 case SystemZ::LTDBRCompare_Pseudo:
9975 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTDBR);
9976 case SystemZ::LTXBRCompare_Pseudo:
9977 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTXBR);
9979 case SystemZ::PROBED_ALLOCA:
9980 return emitProbedAlloca(
MI,
MBB);
9981 case SystemZ::EH_SjLj_SetJmp:
9983 case SystemZ::EH_SjLj_LongJmp:
9986 case TargetOpcode::STACKMAP:
9987 case TargetOpcode::PATCHPOINT:
9998SystemZTargetLowering::getRepRegClassFor(
MVT VT)
const {
9999 if (VT == MVT::Untyped)
10000 return &SystemZ::ADDR128BitRegClass;
10026 DAG.
getMachineNode(SystemZ::EFPC, dl, {MVT::i32, MVT::Other}, Chain), 0);
10046 EVT VT =
Op.getValueType();
10047 Op =
Op.getOperand(0);
10048 EVT OpVT =
Op.getValueType();
10050 assert(OpVT.
isVector() &&
"Operand type for VECREDUCE_ADD is not a vector.");
10082bool SystemZTargetLowering::isFullyInternal(
const Function *Fn)
const {
10086 if (
auto *CB = dyn_cast<CallBase>(U)) {
10087 if (CB->getCalledFunction() != Fn)
10098 if (Attrs.hasRetAttrs())
10100 OS << *
F->getReturnType() <<
" @" <<
F->getName() <<
"(";
10101 for (
unsigned I = 0, E = FT->getNumParams();
I != E; ++
I) {
10104 OS << *FT->getParamType(
I);
10106 for (
auto A : {Attribute::SExt, Attribute::ZExt, Attribute::NoExt})
10113void SystemZTargetLowering::
10116 bool IsInternal =
false;
10117 const Function *CalleeFn =
nullptr;
10118 if (
auto *
G = dyn_cast<GlobalAddressSDNode>(Callee))
10119 if ((CalleeFn = dyn_cast<Function>(
G->getGlobal())))
10120 IsInternal = isFullyInternal(CalleeFn);
10121 if (!verifyNarrowIntegerArgs(Outs, IsInternal)) {
10122 errs() <<
"ERROR: Missing extension attribute of passed "
10123 <<
"value in call to function:\n" <<
"Callee: ";
10124 if (CalleeFn !=
nullptr)
10128 errs() <<
"Caller: ";
10134void SystemZTargetLowering::
10137 if (!verifyNarrowIntegerArgs(Outs, isFullyInternal(
F))) {
10138 errs() <<
"ERROR: Missing extension attribute of returned "
10139 <<
"value from function:\n";
10147bool SystemZTargetLowering::
10149 bool IsInternal)
const {
10164 for (
unsigned i = 0; i < Outs.
size(); ++i) {
10165 MVT VT = Outs[i].VT;
10169 "Unexpected integer argument VT.");
10170 if (VT == MVT::i32 &&
unsigned const MachineRegisterInfo * MRI
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
AMDGPU Register Bank Select
static bool isZeroVector(SDValue N)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
static bool isUndef(ArrayRef< int > Mask)
unsigned const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
static constexpr Register SPReg
static bool isSelectPseudo(MachineInstr &MI)
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static void printFunctionArgExts(const Function *F, raw_fd_ostream &OS)
static void adjustForLTGFR(Comparison &C)
static void adjustSubwordCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static SDValue joinDwords(SelectionDAG &DAG, const SDLoc &DL, SDValue Op0, SDValue Op1)
static cl::opt< bool > EnableIntArgExtCheck("argext-abi-check", cl::init(false), cl::desc("Verify that narrow int args are properly extended per the " "SystemZ ABI."))
static bool isOnlyUsedByStores(SDValue StoredVal, SelectionDAG &DAG)
static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT, unsigned Opcode, SDValue Op0, SDValue Op1, SDValue &Even, SDValue &Odd)
static void adjustForRedundantAnd(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static SDValue lowerAddrSpaceCast(SDValue Op, SelectionDAG &DAG)
static SDValue buildScalarToVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Value)
static SDValue lowerI128ToGR128(SelectionDAG &DAG, SDValue In)
static bool isSimpleShift(SDValue N, unsigned &ShiftVal)
static bool isI128MovedToParts(LoadSDNode *LD, SDNode *&LoPart, SDNode *&HiPart)
static bool chooseShuffleOpNos(int *OpNos, unsigned &OpNo0, unsigned &OpNo1)
static uint32_t findZeroVectorIdx(SDValue *Ops, unsigned Num)
static bool isVectorElementSwap(ArrayRef< int > M, EVT VT)
static void getCSAddressAndShifts(SDValue Addr, SelectionDAG &DAG, SDLoc DL, SDValue &AlignedAddr, SDValue &BitShift, SDValue &NegBitShift)
static bool isShlDoublePermute(const SmallVectorImpl< int > &Bytes, unsigned &StartIndex, unsigned &OpNo0, unsigned &OpNo1)
static SDValue getPermuteNode(SelectionDAG &DAG, const SDLoc &DL, const Permute &P, SDValue Op0, SDValue Op1)
static SDNode * emitIntrinsicWithCCAndChain(SelectionDAG &DAG, SDValue Op, unsigned Opcode)
static SDValue getCCResult(SelectionDAG &DAG, SDValue CCReg)
static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode, unsigned &CCValid)
static void lowerMUL_LOHI32(SelectionDAG &DAG, const SDLoc &DL, unsigned Extend, SDValue Op0, SDValue Op1, SDValue &Hi, SDValue &Lo)
static bool isF128MovedToParts(LoadSDNode *LD, SDNode *&LoPart, SDNode *&HiPart)
static void createPHIsForSelects(SmallVector< MachineInstr *, 8 > &Selects, MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB, MachineBasicBlock *SinkMBB)
static SDValue getGeneralPermuteNode(SelectionDAG &DAG, const SDLoc &DL, SDValue *Ops, const SmallVectorImpl< int > &Bytes)
static unsigned getVectorComparisonOrInvert(ISD::CondCode CC, CmpMode Mode, bool &Invert)
static unsigned CCMaskForCondCode(ISD::CondCode CC)
static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static void adjustForFNeg(Comparison &C)
static bool isScalarToVector(SDValue Op)
static SDValue emitSETCC(SelectionDAG &DAG, const SDLoc &DL, SDValue CCReg, unsigned CCValid, unsigned CCMask)
static bool matchPermute(const SmallVectorImpl< int > &Bytes, const Permute &P, unsigned &OpNo0, unsigned &OpNo1)
static bool isAddCarryChain(SDValue Carry)
static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static MachineOperand earlyUseOperand(MachineOperand Op)
static bool canUseSiblingCall(const CCState &ArgCCInfo, SmallVectorImpl< CCValAssign > &ArgLocs, SmallVectorImpl< ISD::OutputArg > &Outs)
static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask)
static bool getzOSCalleeAndADA(SelectionDAG &DAG, SDValue &Callee, SDValue &ADA, SDLoc &DL, SDValue &Chain)
static bool shouldSwapCmpOperands(const Comparison &C)
static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType)
static SDValue getADAEntry(SelectionDAG &DAG, SDValue Val, SDLoc DL, unsigned Offset, bool LoadAdr=false)
static SDNode * emitIntrinsicWithCC(SelectionDAG &DAG, SDValue Op, unsigned Opcode)
static void adjustForSubtraction(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static bool getVPermMask(SDValue ShuffleOp, SmallVectorImpl< int > &Bytes)
static const Permute PermuteForms[]
static bool isI128MovedFromParts(SDValue Val, SDValue &LoPart, SDValue &HiPart)
static bool isSubBorrowChain(SDValue Carry)
static void adjustICmp128(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static APInt getDemandedSrcElements(SDValue Op, const APInt &DemandedElts, unsigned OpNo)
static SDValue getAbsolute(SelectionDAG &DAG, const SDLoc &DL, SDValue Op, bool IsNegative)
static unsigned computeNumSignBitsBinOp(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth, unsigned OpNo)
static SDValue expandBitCastI128ToF128(SelectionDAG &DAG, SDValue Src, const SDLoc &SL)
static SDValue tryBuildVectorShuffle(SelectionDAG &DAG, BuildVectorSDNode *BVN)
static unsigned getVectorComparison(ISD::CondCode CC, CmpMode Mode)
static SDValue lowerGR128ToI128(SelectionDAG &DAG, SDValue In)
static SDValue MergeInputChains(SDNode *N1, SDNode *N2)
static SDValue expandBitCastF128ToI128(SelectionDAG &DAG, SDValue Src, const SDLoc &SL)
static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask, uint64_t Mask, uint64_t CmpVal, unsigned ICmpType)
static bool isIntrinsicWithCC(SDValue Op, unsigned &Opcode, unsigned &CCValid)
static SDValue expandV4F32ToV2F64(SelectionDAG &DAG, int Start, const SDLoc &DL, SDValue Op, SDValue Chain)
static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1, ISD::CondCode Cond, const SDLoc &DL, SDValue Chain=SDValue(), bool IsSignaling=false)
static bool checkCCKill(MachineInstr &MI, MachineBasicBlock *MBB)
static Register forceReg(MachineInstr &MI, MachineOperand &Base, const SystemZInstrInfo *TII)
static bool is32Bit(EVT VT)
static std::pair< unsigned, const TargetRegisterClass * > parseRegisterNumber(StringRef Constraint, const TargetRegisterClass *RC, const unsigned *Map, unsigned Size)
static bool matchDoublePermute(const SmallVectorImpl< int > &Bytes, const Permute &P, SmallVectorImpl< int > &Transform)
static Comparison getIntrinsicCmp(SelectionDAG &DAG, unsigned Opcode, SDValue Call, unsigned CCValid, uint64_t CC, ISD::CondCode Cond)
static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg)
static AddressingMode getLoadStoreAddrMode(bool HasVector, Type *Ty)
static SDValue buildMergeScalars(SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Op0, SDValue Op1)
static void computeKnownBitsBinOp(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth, unsigned OpNo)
static bool getShuffleInput(const SmallVectorImpl< int > &Bytes, unsigned Start, unsigned BytesPerElement, int &Base)
static AddressingMode supportedAddressingMode(Instruction *I, bool HasVector)
static bool isF128MovedFromParts(SDValue Val, SDValue &LoPart, SDValue &HiPart)
static void adjustZeroCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
APInt zext(unsigned width) const
Zero extend to a new width.
uint64_t getZExtValue() const
Get zero extended value.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
unsigned getActiveBits() const
Compute the number of active bits in the value.
APInt trunc(unsigned width) const
Truncate to new width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isSingleWord() const
Determine if this APInt just has one word to store value.
void insertBits(const APInt &SubBits, unsigned bitPosition)
Insert the bits from a smaller APInt starting at bitPosition.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
an instruction that atomically reads a memory location, combines it with another value,...
BinOp getOperation() const
bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
StringRef getValueAsString() const
Return the attribute's value as a string.
static StringRef getNameFromAttrKind(Attribute::AttrKind AttrKind)
LLVM Basic Block Representation.
The address of a basic block.
A "pseudo-class" with methods for operating on BUILD_VECTORs.
bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector.
CCState - This class holds information needed while lowering arguments and return values.
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
LocInfo getLocInfo() const
int64_t getLocMemOffset() const
This class represents a function call, abstracting a target machine's calling convention.
This is an important base class in LLVM.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
iterator find(const_arg_type_t< KeyT > Val)
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
uint64_t getFnAttributeAsParsedInteger(StringRef Kind, uint64_t Default=0) const
For a string attribute Kind, parse attribute as an integer.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
const GlobalObject * getAliaseeObject() const
bool hasLocalLinkage() const
bool hasPrivateLinkage() const
bool hasInternalLinkage() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
This class is used to represent ISD::LOAD nodes.
static auto integer_fixedlen_vector_valuetypes()
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static MVT getVectorVT(MVT VT, unsigned NumElements)
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator_range< succ_iterator > successors()
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
void setMachineBlockAddressTaken()
Set this block to indicate that its address is used as something other than the target of a terminato...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setMaxCallFrameSize(uint64_t S)
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setAdjustsStack(bool V)
void setFrameAddressIsTaken(bool T)
uint64_t getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
void setReturnAddressIsTaken(bool s)
MachineFunctionProperties & reset(Property P)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
void push_back(MachineBasicBlock *MBB)
reverse_iterator rbegin()
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineFunctionProperties & getProperties() const
Get the function properties.
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr reads the specified register.
bool killsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr kills the specified register.
bool definesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr fully defines the specified register.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
Register getReg() const
getReg - Returns the register number.
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
bool hasOneUse() const
Return true if there is exactly one use of this node.
SDNodeFlags getFlags() const
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
iterator_range< user_iterator > users()
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
void setFlags(SDNodeFlags NewFlags)
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
bool isMachineOpcode() const
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
const APInt & getConstantOperandAPInt(unsigned i) const
uint64_t getScalarValueSizeInBits() const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
unsigned getMachineOpcode() const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
bool isConstantIntBuildVectorOrConstantInt(SDValue N, bool AllowOpaques=true) const
Test whether the given value is a constant int or similar node.
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
SDValue getRegister(Register Reg, EVT VT)
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getGLOBAL_OFFSET_TABLE(EVT VT)
Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, MachineMemOperand *MMO)
Gets a node for an atomic op, produces result (if relevant) and chain and takes 2 operands.
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
const TargetLowering & getTargetLoweringInfo() const
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
const DataLayout & getDataLayout() const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
bool SignBitIsZero(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero.
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getRegisterMask(const uint32_t *RegMask)
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
ArrayRef< int > getMask() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
constexpr size_t size() const
size - Get the string size.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
A SystemZ-specific class detailing special use registers particular for calling conventions.
virtual int getStackPointerBias()=0
virtual int getReturnFunctionAddressRegister()=0
virtual int getCallFrameSize()=0
virtual int getStackPointerRegister()=0
A SystemZ-specific constant pool value.
static SystemZConstantPoolValue * Create(const GlobalValue *GV, SystemZCP::SystemZCPModifier Modifier)
unsigned getVarArgsFrameIndex() const
void setVarArgsFrameIndex(unsigned FI)
void setRegSaveFrameIndex(unsigned FI)
void incNumLocalDynamicTLSAccesses()
Register getVarArgsFirstGPR() const
void setADAVirtualRegister(Register Reg)
void setVarArgsFirstGPR(Register GPR)
Register getADAVirtualRegister() const
void setSizeOfFnParams(unsigned Size)
void setVarArgsFirstFPR(Register FPR)
unsigned getRegSaveFrameIndex() const
Register getVarArgsFirstFPR() const
const SystemZInstrInfo * getInstrInfo() const override
bool isPC32DBLSymbol(const GlobalValue *GV, CodeModel::Model CM) const
const TargetFrameLowering * getFrameLowering() const override
bool isTargetXPLINK64() const
SystemZCallingConventionRegisters * getSpecialRegisters() const
const SystemZRegisterInfo * getRegisterInfo() const override
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes) const override
Returns the target specific optimal type for load and store operations as a result of memset,...
bool hasInlineStackProbe(const MachineFunction &MF) const override
Returns true if stack probing through inline assembly is requested.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *BB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
MachineBasicBlock * emitEHSjLjSetJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
AtomicExpansionKind shouldCastAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be cast by the IR-level AtomicExpand pass.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &, EVT) const override
Return the ValueType of the result of SETCC operations.
bool allowTruncateForTailCall(Type *, Type *) const override
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
MachineBasicBlock * emitEHSjLjLongJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
bool findOptimalMemOpLowering(std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const override
Determines the optimal series of memory ops to replace the memset / memcpy.
bool useSoftFloat() const override
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context, const Type *RetTy) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
std::pair< SDValue, SDValue > makeExternalCall(SDValue Chain, SelectionDAG &DAG, const char *CalleeName, EVT RetVT, ArrayRef< SDValue > Ops, CallingConv::ID CallConv, bool IsSigned, SDLoc DL, bool DoesNotReturn, bool IsReturnValueUsed) const
bool mayBeEmittedAsTailCall(const CallInst *CI) const override
Return true if the target may be able emit the call instruction as a tail call.
bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const override
Target-specific splitting of values into parts that fit a register storing a legal type.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
SystemZTargetLowering(const TargetMachine &TM, const SystemZSubtarget &STI)
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
bool isLegalICmpImmediate(int64_t Imm) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
TargetLowering::ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const override
Determine if the target supports unaligned memory accesses.
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
TargetLowering::ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const override
Target-specific combining of register parts into its original value.
bool isTruncateFree(Type *, Type *) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const override
Determine the number of bits in the operation that are sign bits.
void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
bool isLegalAddImmediate(int64_t Imm) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
bool isGuaranteedNotToBeUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, unsigned Depth) const override
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
AtomicExpansionKind shouldCastAtomicStoreInIR(StoreInst *SI) const override
Returns how the given (atomic) store should be cast by the IR-level AtomicExpand pass into.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
unsigned getStackProbeSize(const MachineFunction &MF) const
XPLINK64 calling convention specific use registers Particular to z/OS when in 64 bit mode.
int getCallFrameSize() final
Information about stack frame layout on the target.
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setAtomicLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Let target indicate that an extending atomic load of the specified type is legal.
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
virtual const TargetRegisterClass * getRepRegClassFor(MVT VT) const
Return the 'representative' register class for the specified value type.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
virtual bool shouldSignExtendTypeInLibCall(Type *Ty, bool IsSigned) const
Returns true if arguments should be sign-extended in lib calls.
std::vector< ArgListEntry > ArgListTy
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool findOptimalMemOpLowering(std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const
Determines the optimal series of memory ops to replace the memset / memcpy.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Primary interface to the complete machine description for the target machine.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
unsigned getPointerSize(unsigned AS) const
Get the pointer size for this target.
CodeModel::Model getCodeModel() const
Returns the code model.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
A Use represents the edge between a Value definition and its users.
User * getUser() const
Returns the User that contains this Use.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
user_iterator user_begin()
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
int getNumOccurrences() const
constexpr ScalarTy getFixedValue() const
self_iterator getIterator()
A raw_ostream that writes to a file descriptor.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ EH_SJLJ_LONGJMP
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ MEMBARRIER
MEMBARRIER - Compiler barrier only; generate a no-op.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ STRICT_FSQRT
Constrained versions of libm-equivalent floating point intrinsics.
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
@ BR_JT
BR_JT - Jumptable branch.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ UNDEF
UNDEF - An undefined node.
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ GET_ROUNDING
Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest, ties to even 2 Round to ...
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ STRICT_SINT_TO_FP
STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to a floating point value.
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ STRICT_FADD
Constrained versions of the binary floating point operators.
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ ADDRSPACECAST
ADDRSPACECAST - This operator converts between pointers of different address spaces.
@ EH_SJLJ_SETJMP
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj....
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ BRCOND
BRCOND - Conditional branch.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ GET_DYNAMIC_AREA_OFFSET
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isNormalStore(const SDNode *N)
Returns true if the specified node is a non-truncating and unindexed store.
CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
ID ArrayRef< Type * > Tys
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
@ Define
Register definition.
@ System
Synchronized with respect to all concurrently executing threads.
@ MO_ADA_DATA_SYMBOL_ADDR
@ MO_ADA_DIRECT_FUNC_DESC
@ MO_ADA_INDIRECT_FUNC_DESC
const unsigned GR64Regs[16]
const unsigned VR128Regs[32]
const unsigned GR128Regs[16]
const unsigned FP32Regs[16]
const unsigned GR32Regs[16]
const unsigned FP64Regs[16]
const int64_t ELFCallFrameSize
const unsigned VR64Regs[32]
const unsigned FP128Regs[16]
const unsigned VR32Regs[32]
unsigned odd128(bool Is32bit)
const unsigned CCMASK_CMP_GE
static bool isImmHH(uint64_t Val)
const unsigned CCMASK_TEND
const unsigned CCMASK_CS_EQ
const unsigned CCMASK_TBEGIN
const MCPhysReg ELFArgFPRs[ELFNumArgFPRs]
MachineBasicBlock * splitBlockBefore(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
const unsigned CCMASK_TM_SOME_1
const unsigned CCMASK_LOGICAL_CARRY
const unsigned TDCMASK_NORMAL_MINUS
const unsigned CCMASK_TDC
const unsigned CCMASK_FCMP
const unsigned CCMASK_TM_SOME_0
static bool isImmHL(uint64_t Val)
const unsigned TDCMASK_SUBNORMAL_MINUS
const unsigned TDCMASK_NORMAL_PLUS
const unsigned CCMASK_CMP_GT
const unsigned TDCMASK_QNAN_MINUS
const unsigned CCMASK_ANY
const unsigned CCMASK_ARITH
const unsigned CCMASK_TM_MIXED_MSB_0
const unsigned TDCMASK_SUBNORMAL_PLUS
static bool isImmLL(uint64_t Val)
const unsigned VectorBits
static bool isImmLH(uint64_t Val)
MachineBasicBlock * emitBlockAfter(MachineBasicBlock *MBB)
const unsigned TDCMASK_INFINITY_PLUS
unsigned reverseCCMask(unsigned CCMask)
const unsigned CCMASK_TM_ALL_0
const unsigned CCMASK_CMP_LE
const unsigned CCMASK_CMP_O
const unsigned CCMASK_CMP_EQ
const unsigned VectorBytes
const unsigned TDCMASK_INFINITY_MINUS
const unsigned CCMASK_ICMP
const unsigned CCMASK_VCMP_ALL
MachineBasicBlock * splitBlockAfter(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
const unsigned CCMASK_VCMP
const unsigned CCMASK_TM_MIXED_MSB_1
const unsigned CCMASK_TM_MSB_0
const unsigned CCMASK_ARITH_OVERFLOW
const unsigned CCMASK_CS_NE
const unsigned TDCMASK_SNAN_PLUS
const unsigned CCMASK_CMP_LT
const unsigned CCMASK_CMP_NE
const unsigned TDCMASK_ZERO_PLUS
const unsigned TDCMASK_QNAN_PLUS
const unsigned TDCMASK_ZERO_MINUS
unsigned even128(bool Is32bit)
const unsigned CCMASK_TM_ALL_1
const unsigned CCMASK_LOGICAL_BORROW
const unsigned ELFNumArgFPRs
const unsigned CCMASK_CMP_UO
const unsigned CCMASK_LOGICAL
const unsigned CCMASK_TM_MSB_1
const unsigned TDCMASK_SNAN_MINUS
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
support::ulittle32_t Word
NodeAddr< CodeNode * > Code
constexpr const char32_t SBase
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
testing::Matcher< const detail::ErrorHolder & > Failed()
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void dumpBytes(ArrayRef< uint8_t > Bytes, raw_ostream &OS)
Convert ‘Bytes’ to a hex string and output to ‘OS’.
T bit_ceil(T Value)
Returns the smallest integral power of two no smaller than Value if Value is nonzero.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
@ Mul
Product of integers.
DWARFExpression::Operation Op
constexpr unsigned BitWidth
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
AddressingMode(bool LongDispl, bool IdxReg)
This struct is a compact representation of a valid (non-zero power of two) alignment.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isRound() const
Return true if the size is a power-of-two number of bytes.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
unsigned getBitWidth() const
Get the bit width of this value.
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
void resetAll()
Resets the known state of all bits.
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
const uint32_t * getNoPreservedMask() const override
SystemZVectorConstantInfo(APInt IntImm)
SmallVector< unsigned, 2 > OpVals
bool isVectorConstantLegal(const SystemZSubtarget &Subtarget)
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This contains information for each constraint that we are lowering.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setZExtResult(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setSExtResult(bool Value=true)
CallLoweringInfo & setNoReturn(bool Value=true)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})