26#include "llvm/IR/IntrinsicsS390.h"
35#define DEBUG_TYPE "systemz-lower"
41 cl::desc(
"Verify that narrow int args are properly extended per the "
48 : Op0(Op0In), Op1(Op1In), Chain(ChainIn),
49 Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {}
99 if (Subtarget.hasHighWord())
105 if (Subtarget.hasVector()) {
112 if (Subtarget.hasVectorEnhancements1())
117 if (Subtarget.hasVector()) {
126 if (Subtarget.hasVector())
153 for (
unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
154 I <= MVT::LAST_FP_VALUETYPE;
180 for (
unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
181 I <= MVT::LAST_INTEGER_VALUETYPE;
212 if (Subtarget.hasPopulationCount())
231 if (!Subtarget.hasFPExtension())
237 if (Subtarget.hasFPExtension())
242 if (Subtarget.hasFPExtension())
311 {MVT::i8, MVT::i16, MVT::i32},
Legal);
313 {MVT::i8, MVT::i16},
Legal);
330 if (!Subtarget.hasFPExtension()) {
343 if (Subtarget.hasMiscellaneousExtensions3()) {
436 if (VT != MVT::v2i64)
442 if (Subtarget.hasVectorEnhancements1())
473 if (Subtarget.hasVector()) {
495 if (Subtarget.hasVectorEnhancements2()) {
516 for (
unsigned I = MVT::FIRST_FP_VALUETYPE;
517 I <= MVT::LAST_FP_VALUETYPE;
525 if (Subtarget.hasFPExtension()) {
553 if (Subtarget.hasFPExtension()) {
564 if (Subtarget.hasVector()) {
610 if (Subtarget.hasVectorEnhancements1()) {
617 if (Subtarget.hasVectorEnhancements1()) {
671 for (
auto VT : { MVT::f32, MVT::f64, MVT::f128,
672 MVT::v4f32, MVT::v2f64 }) {
681 if (!Subtarget.hasVectorEnhancements1()) {
687 if (Subtarget.hasVectorEnhancements1())
697 if (Subtarget.hasVectorEnhancements1()) {
709 if (!Subtarget.hasVector()) {
775 struct RTLibCallMapping {
779 static RTLibCallMapping RTLibCallCommon[] = {
780#define HANDLE_LIBCALL(code, name) {RTLIB::code, name},
781#include "ZOSLibcallNames.def"
783 for (
auto &E : RTLibCallCommon)
789 return Subtarget.hasSoftFloat();
814 return Subtarget.hasVectorEnhancements1();
827 if (!Subtarget.hasVector() ||
828 (isFP128 && !Subtarget.hasVectorEnhancements1()))
850 if (SplatBitSize > 64)
856 if (isInt<16>(SignedValue)) {
865 if (
TII->isRxSBGMask(
Value, SplatBitSize, Start,
End)) {
887 uint64_t Lower = SplatUndefZ & maskTrailingOnes<uint64_t>(LowerBits);
888 uint64_t Upper = SplatUndefZ & maskLeadingOnes<uint64_t>(UpperBits);
895 uint64_t Middle = SplatUndefZ & ~Upper & ~Lower;
896 return tryValue(SplatBitsZ | Middle);
911 unsigned HalfSize = Width / 2;
916 if (HighValue != LowValue || 8 > HalfSize)
919 SplatBits = HighValue;
923 SplatBitSize = Width;
931 BVN->
isConstantSplat(IntBits, SplatUndef, SplatBitSize, HasAnyUndefs, 128,
935 BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, 8,
940 bool ForCodeSize)
const {
942 if (Imm.isZero() || Imm.isNegZero())
963 assert(
TRI->isTypeLegalForClass(*RC, MVT::i32) &&
"Invalid destination!");
965 Register mainDstReg =
MRI.createVirtualRegister(RC);
966 Register restoreDstReg =
MRI.createVirtualRegister(RC);
969 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
"Invalid Pointer Size!");
1022 const int64_t FPOffset = 0;
1031 unsigned LabelReg =
MRI.createVirtualRegister(PtrRC);
1047 .
addReg(SpecialRegs->getFramePointerRegister())
1055 .
addReg(SpecialRegs->getStackPointerRegister())
1063 Register BCReg =
MRI.createVirtualRegister(PtrRC);
1066 .
addReg(SpecialRegs->getStackPointerRegister())
1067 .
addImm(TFL->getBackchainOffset(*MF))
1078 MIB =
BuildMI(*thisMBB,
MI,
DL,
TII->get(SystemZ::EH_SjLj_Setup))
1103 MI.eraseFromParent();
1119 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
"Invalid Pointer Size!");
1129 const int64_t FPOffset = 0;
1141 SpecialRegs->getFramePointerRegister())
1163 SpecialRegs->getStackPointerRegister())
1172 .
addReg(SpecialRegs->getStackPointerRegister())
1173 .
addImm(TFL->getBackchainOffset(*MF))
1179 MI.eraseFromParent();
1209 if (Subtarget.hasInterlockedAccess1() &&
1223 return isInt<32>(Imm) || isUInt<32>(Imm);
1228 return isUInt<32>(Imm) || isUInt<32>(-Imm);
1250 LongDisplacement(LongDispl), IndexReg(IdxReg) {}
1273 switch (
II->getIntrinsicID()) {
1275 case Intrinsic::memset:
1276 case Intrinsic::memmove:
1277 case Intrinsic::memcpy:
1282 if (isa<LoadInst>(
I) &&
I->hasOneUse()) {
1283 auto *SingleUser = cast<Instruction>(*
I->user_begin());
1284 if (SingleUser->getParent() ==
I->getParent()) {
1285 if (isa<ICmpInst>(SingleUser)) {
1286 if (
auto *
C = dyn_cast<ConstantInt>(SingleUser->getOperand(1)))
1287 if (
C->getBitWidth() <= 64 &&
1288 (isInt<16>(
C->getSExtValue()) || isUInt<16>(
C->getZExtValue())))
1291 }
else if (isa<StoreInst>(SingleUser))
1295 }
else if (
auto *StoreI = dyn_cast<StoreInst>(
I)) {
1296 if (
auto *LoadI = dyn_cast<LoadInst>(StoreI->getValueOperand()))
1297 if (LoadI->hasOneUse() && LoadI->getParent() ==
I->getParent())
1302 if (HasVector && (isa<LoadInst>(
I) || isa<StoreInst>(
I))) {
1310 Type *MemAccessTy = (isa<LoadInst>(
I) ?
I->getType() :
1311 I->getOperand(0)->getType());
1313 bool IsVectorAccess = MemAccessTy->
isVectorTy();
1317 if (!IsVectorAccess && isa<StoreInst>(
I)) {
1318 Value *DataOp =
I->getOperand(0);
1319 if (isa<ExtractElementInst>(DataOp))
1320 IsVectorAccess =
true;
1325 if (!IsVectorAccess && isa<LoadInst>(
I) &&
I->hasOneUse()) {
1326 User *LoadUser = *
I->user_begin();
1327 if (isa<InsertElementInst>(LoadUser))
1328 IsVectorAccess =
true;
1331 if (IsFPAccess || IsVectorAccess)
1360 return AM.
Scale == 0;
1367 std::vector<EVT> &MemOps,
unsigned Limit,
const MemOp &
Op,
unsigned DstAS,
1368 unsigned SrcAS,
const AttributeList &FuncAttributes)
const {
1369 const int MVCFastLen = 16;
1371 if (Limit != ~
unsigned(0)) {
1373 if (
Op.isMemcpy() &&
Op.allowOverlap() &&
Op.size() <= MVCFastLen)
1375 if (
Op.isMemset() &&
Op.size() - 1 <= MVCFastLen)
1377 if (
Op.isZeroMemset())
1382 SrcAS, FuncAttributes);
1387 return Subtarget.hasVector() ? MVT::v2i64 : MVT::Other;
1391 if (!FromType->isIntegerTy() || !ToType->
isIntegerTy())
1393 unsigned FromBits = FromType->getPrimitiveSizeInBits().getFixedValue();
1395 return FromBits > ToBits;
1403 return FromBits > ToBits;
1412 if (Constraint.
size() == 1) {
1413 switch (Constraint[0]) {
1439 }
else if (Constraint.
size() == 2 && Constraint[0] ==
'Z') {
1440 switch (Constraint[1]) {
1456 const char *constraint)
const {
1458 Value *CallOperandVal =
info.CallOperandVal;
1461 if (!CallOperandVal)
1465 switch (*constraint) {
1483 if (Subtarget.hasVector())
1489 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1490 if (isUInt<8>(
C->getZExtValue()))
1495 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1496 if (isUInt<12>(
C->getZExtValue()))
1501 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1502 if (isInt<16>(
C->getSExtValue()))
1507 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1508 if (isInt<20>(
C->getSExtValue()))
1513 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1514 if (
C->getZExtValue() == 0x7fffffff)
1524static std::pair<unsigned, const TargetRegisterClass *>
1526 const unsigned *Map,
unsigned Size) {
1527 assert(*(Constraint.
end()-1) ==
'}' &&
"Missing '}'");
1528 if (isdigit(Constraint[2])) {
1533 return std::make_pair(Map[Index], RC);
1535 return std::make_pair(0U,
nullptr);
1538std::pair<unsigned, const TargetRegisterClass *>
1541 if (Constraint.
size() == 1) {
1543 switch (Constraint[0]) {
1548 return std::make_pair(0U, &SystemZ::GR64BitRegClass);
1550 return std::make_pair(0U, &SystemZ::GR128BitRegClass);
1551 return std::make_pair(0U, &SystemZ::GR32BitRegClass);
1555 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass);
1556 else if (VT == MVT::i128)
1557 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass);
1558 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass);
1561 return std::make_pair(0U, &SystemZ::GRH32BitRegClass);
1566 return std::make_pair(0U, &SystemZ::FP64BitRegClass);
1568 return std::make_pair(0U, &SystemZ::FP128BitRegClass);
1569 return std::make_pair(0U, &SystemZ::FP32BitRegClass);
1574 if (Subtarget.hasVector()) {
1576 return std::make_pair(0U, &SystemZ::VR32BitRegClass);
1578 return std::make_pair(0U, &SystemZ::VR64BitRegClass);
1579 return std::make_pair(0U, &SystemZ::VR128BitRegClass);
1588 auto getVTSizeInBits = [&VT]() {
1596 if (Constraint[1] ==
'r') {
1597 if (getVTSizeInBits() == 32)
1600 if (getVTSizeInBits() == 128)
1606 if (Constraint[1] ==
'f') {
1608 return std::make_pair(
1610 if (getVTSizeInBits() == 32)
1613 if (getVTSizeInBits() == 128)
1619 if (Constraint[1] ==
'v') {
1620 if (!Subtarget.hasVector())
1621 return std::make_pair(
1623 if (getVTSizeInBits() == 32)
1626 if (getVTSizeInBits() == 64)
1644 : SystemZ::NoRegister)
1646 Subtarget.
isTargetELF() ? SystemZ::R15D : SystemZ::NoRegister)
1647 .
Default(SystemZ::NoRegister);
1655 const Constant *PersonalityFn)
const {
1660 const Constant *PersonalityFn)
const {
1668 if (Constraint.
size() == 1) {
1669 switch (Constraint[0]) {
1671 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1672 if (isUInt<8>(
C->getZExtValue()))
1674 Op.getValueType()));
1678 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1679 if (isUInt<12>(
C->getZExtValue()))
1681 Op.getValueType()));
1685 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1686 if (isInt<16>(
C->getSExtValue()))
1688 C->getSExtValue(),
SDLoc(
Op),
Op.getValueType()));
1692 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1693 if (isInt<20>(
C->getSExtValue()))
1695 C->getSExtValue(),
SDLoc(
Op),
Op.getValueType()));
1699 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1700 if (
C->getZExtValue() == 0x7fffffff)
1702 Op.getValueType()));
1713#include "SystemZGenCallingConv.inc"
1717 static const MCPhysReg ScratchRegs[] = { SystemZ::R0D, SystemZ::R1D,
1723 Type *ToType)
const {
1786 if (BitCastToType == MVT::v2i64)
1813 MVT::Untyped,
Hi,
Lo);
1837 unsigned NumParts,
MVT PartVT, std::optional<CallingConv::ID>
CC)
const {
1839 if (ValueVT.
getSizeInBits() == 128 && NumParts == 1 && PartVT == MVT::Untyped) {
1850 MVT PartVT,
EVT ValueVT, std::optional<CallingConv::ID>
CC)
const {
1851 if (ValueVT.
getSizeInBits() == 128 && NumParts == 1 && PartVT == MVT::Untyped) {
1878 unsigned NumFixedGPRs = 0;
1879 unsigned NumFixedFPRs = 0;
1880 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
1893 RC = &SystemZ::GR32BitRegClass;
1897 RC = &SystemZ::GR64BitRegClass;
1901 RC = &SystemZ::FP32BitRegClass;
1905 RC = &SystemZ::FP64BitRegClass;
1909 RC = &SystemZ::FP128BitRegClass;
1917 RC = &SystemZ::VR128BitRegClass;
1946 ArgValue = DAG.
getLoad(LocVT,
DL, Chain, FIN,
1957 unsigned ArgIndex = Ins[
I].OrigArgIndex;
1958 assert (Ins[
I].PartOffset == 0);
1959 while (
I + 1 != E && Ins[
I + 1].OrigArgIndex == ArgIndex) {
1961 unsigned PartOffset = Ins[
I + 1].PartOffset;
1984 int64_t VarArgOffset = CCInfo.
getStackSize() + Regs->getCallFrameSize();
2002 int64_t RegSaveOffset =
2017 &SystemZ::FP64BitRegClass);
2035 MRI.addLiveIn(Regs->getADARegister(), ADAvReg);
2047 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
2054 if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D)
2056 if (Outs[
I].Flags.isSwiftSelf() || Outs[
I].Flags.isSwiftError())
2063 unsigned Offset,
bool LoadAdr =
false) {
2086 bool LoadAddr =
false;
2087 const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV);
2108 unsigned ADADelta = 0;
2109 unsigned EPADelta = 8;
2114 if (
auto *
G = dyn_cast<GlobalAddressSDNode>(Callee)) {
2115 bool IsInternal = (
G->getGlobal()->hasInternalLinkage() ||
2116 G->getGlobal()->hasPrivateLinkage());
2131 }
else if (
auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
2173 verifyNarrowIntegerArgs_Call(Outs, &MF.
getFunction(), Callee);
2196 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
2202 unsigned ArgIndex = Outs[
I].OrigArgIndex;
2204 if (
I + 1 != E && Outs[
I + 1].OrigArgIndex == ArgIndex) {
2206 Type *OrigArgType = CLI.
Args[Outs[
I].OrigArgIndex].Ty;
2212 SlotVT = Outs[
I].VT;
2215 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
2221 assert (Outs[
I].PartOffset == 0);
2222 while (
I + 1 != E && Outs[
I + 1].OrigArgIndex == ArgIndex) {
2223 SDValue PartValue = OutVals[
I + 1];
2224 unsigned PartOffset = Outs[
I + 1].PartOffset;
2231 SlotVT.
getStoreSize()) &&
"Not enough space for argument part!");
2234 ArgValue = SpillSlot;
2251 if (!StackPtr.getNode())
2273 RegsToPass.
push_back(std::make_pair(SystemZ::R3D, ShadowArgValue));
2279 if (!MemOpChains.
empty())
2292 ->getAddressOfCalleeRegister();
2295 Callee = DAG.
getRegister(CalleeReg, Callee.getValueType());
2300 if (
auto *
G = dyn_cast<GlobalAddressSDNode>(Callee)) {
2303 }
else if (
auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
2306 }
else if (IsTailCall) {
2309 Callee = DAG.
getRegister(SystemZ::R1D, Callee.getValueType());
2314 for (
unsigned I = 0, E = RegsToPass.
size();
I != E; ++
I) {
2316 RegsToPass[
I].second, Glue);
2327 for (
unsigned I = 0, E = RegsToPass.
size();
I != E; ++
I)
2329 RegsToPass[
I].second.getValueType()));
2333 const uint32_t *Mask =
TRI->getCallPreservedMask(MF, CallConv);
2334 assert(Mask &&
"Missing call preserved mask for calling convention");
2358 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, Ctx);
2365 VA.getLocVT(), Glue);
2382 bool DoesNotReturn,
bool IsReturnValueUsed)
const {
2384 Args.reserve(Ops.
size());
2389 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.
getContext());
2391 Entry.IsZExt = !Entry.IsSExt;
2392 Args.push_back(Entry);
2418 for (
auto &Out : Outs)
2419 if (Out.ArgVT == MVT::i128)
2423 CCState RetCCInfo(CallConv, isVarArg, MF, RetLocs, Context);
2424 return RetCCInfo.
CheckReturn(Outs, RetCC_SystemZ);
2436 verifyNarrowIntegerArgs_Ret(Outs, &MF.
getFunction());
2444 if (RetLocs.
empty())
2454 for (
unsigned I = 0, E = RetLocs.
size();
I != E; ++
I) {
2483 unsigned &CCValid) {
2484 unsigned Id =
Op.getConstantOperandVal(1);
2486 case Intrinsic::s390_tbegin:
2491 case Intrinsic::s390_tbegin_nofloat:
2496 case Intrinsic::s390_tend:
2510 unsigned Id =
Op.getConstantOperandVal(0);
2512 case Intrinsic::s390_vpkshs:
2513 case Intrinsic::s390_vpksfs:
2514 case Intrinsic::s390_vpksgs:
2519 case Intrinsic::s390_vpklshs:
2520 case Intrinsic::s390_vpklsfs:
2521 case Intrinsic::s390_vpklsgs:
2526 case Intrinsic::s390_vceqbs:
2527 case Intrinsic::s390_vceqhs:
2528 case Intrinsic::s390_vceqfs:
2529 case Intrinsic::s390_vceqgs:
2534 case Intrinsic::s390_vchbs:
2535 case Intrinsic::s390_vchhs:
2536 case Intrinsic::s390_vchfs:
2537 case Intrinsic::s390_vchgs:
2542 case Intrinsic::s390_vchlbs:
2543 case Intrinsic::s390_vchlhs:
2544 case Intrinsic::s390_vchlfs:
2545 case Intrinsic::s390_vchlgs:
2550 case Intrinsic::s390_vtm:
2555 case Intrinsic::s390_vfaebs:
2556 case Intrinsic::s390_vfaehs:
2557 case Intrinsic::s390_vfaefs:
2562 case Intrinsic::s390_vfaezbs:
2563 case Intrinsic::s390_vfaezhs:
2564 case Intrinsic::s390_vfaezfs:
2569 case Intrinsic::s390_vfeebs:
2570 case Intrinsic::s390_vfeehs:
2571 case Intrinsic::s390_vfeefs:
2576 case Intrinsic::s390_vfeezbs:
2577 case Intrinsic::s390_vfeezhs:
2578 case Intrinsic::s390_vfeezfs:
2583 case Intrinsic::s390_vfenebs:
2584 case Intrinsic::s390_vfenehs:
2585 case Intrinsic::s390_vfenefs:
2590 case Intrinsic::s390_vfenezbs:
2591 case Intrinsic::s390_vfenezhs:
2592 case Intrinsic::s390_vfenezfs:
2597 case Intrinsic::s390_vistrbs:
2598 case Intrinsic::s390_vistrhs:
2599 case Intrinsic::s390_vistrfs:
2604 case Intrinsic::s390_vstrcbs:
2605 case Intrinsic::s390_vstrchs:
2606 case Intrinsic::s390_vstrcfs:
2611 case Intrinsic::s390_vstrczbs:
2612 case Intrinsic::s390_vstrczhs:
2613 case Intrinsic::s390_vstrczfs:
2618 case Intrinsic::s390_vstrsb:
2619 case Intrinsic::s390_vstrsh:
2620 case Intrinsic::s390_vstrsf:
2625 case Intrinsic::s390_vstrszb:
2626 case Intrinsic::s390_vstrszh:
2627 case Intrinsic::s390_vstrszf:
2632 case Intrinsic::s390_vfcedbs:
2633 case Intrinsic::s390_vfcesbs:
2638 case Intrinsic::s390_vfchdbs:
2639 case Intrinsic::s390_vfchsbs:
2644 case Intrinsic::s390_vfchedbs:
2645 case Intrinsic::s390_vfchesbs:
2650 case Intrinsic::s390_vftcidb:
2651 case Intrinsic::s390_vftcisb:
2656 case Intrinsic::s390_tdc:
2674 for (
unsigned I = 2;
I < NumOps; ++
I)
2677 assert(
Op->getNumValues() == 2 &&
"Expected only CC result and chain");
2683 return Intr.getNode();
2693 for (
unsigned I = 1;
I < NumOps; ++
I)
2697 return Intr.getNode();
2707 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \
2708 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \
2709 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X
2734 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1.getNode());
2735 if (!ConstOp1 || ConstOp1->getValueSizeInBits(0) > 64)
2738 int64_t
Value = ConstOp1->getSExtValue();
2754 if (!
C.Op0.hasOneUse() ||
2760 auto *Load = cast<LoadSDNode>(
C.Op0);
2761 unsigned NumBits = Load->getMemoryVT().getSizeInBits();
2762 if ((NumBits != 8 && NumBits != 16) ||
2763 NumBits != Load->getMemoryVT().getStoreSizeInBits())
2768 auto *ConstOp1 = cast<ConstantSDNode>(
C.Op1);
2769 if (!ConstOp1 || ConstOp1->getValueSizeInBits(0) > 64)
2772 uint64_t Mask = (1 << NumBits) - 1;
2775 int64_t SignedValue = ConstOp1->getSExtValue();
2782 }
else if (NumBits == 8) {
2808 if (
C.Op0.getValueType() != MVT::i32 ||
2809 Load->getExtensionType() != ExtType) {
2811 Load->getBasePtr(), Load->getPointerInfo(),
2812 Load->getMemoryVT(), Load->getAlign(),
2813 Load->getMemOperand()->getFlags());
2819 if (
C.Op1.getValueType() != MVT::i32 ||
2820 Value != ConstOp1->getZExtValue())
2827 auto *Load = dyn_cast<LoadSDNode>(
Op.getNode());
2830 if (Load->getMemoryVT() == MVT::i8)
2833 switch (Load->getExtensionType()) {
2850 if (
C.Op0.getValueType() == MVT::i128)
2852 if (
C.Op0.getValueType() == MVT::f128)
2858 if (isa<ConstantFPSDNode>(
C.Op1))
2863 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1);
2864 if (ConstOp1 && ConstOp1->getZExtValue() == 0)
2882 isUInt<16>(ConstOp1->getZExtValue()))
2887 isInt<16>(ConstOp1->getSExtValue()))
2893 unsigned Opcode0 =
C.Op0.getOpcode();
2900 C.Op0.getConstantOperandVal(1) == 0xffffffff)
2915 ((
N->getOperand(0) ==
C.Op0 &&
N->getOperand(1) ==
C.Op1) ||
2916 (
N->getOperand(0) ==
C.Op1 &&
N->getOperand(1) ==
C.Op0))) {
2937 auto *C1 = dyn_cast<ConstantFPSDNode>(
C.Op1);
2938 if (C1 && C1->isZero()) {
2957 if (
C.Op0.getOpcode() ==
ISD::SHL &&
C.Op0.getValueType() == MVT::i64 &&
2959 auto *C1 = dyn_cast<ConstantSDNode>(
C.Op0.getOperand(1));
2960 if (C1 && C1->getZExtValue() == 32) {
2961 SDValue ShlOp0 =
C.Op0.getOperand(0);
2965 cast<VTSDNode>(
N->getOperand(1))->getVT() == MVT::i32) {
2980 C.Op0.getOperand(0).getOpcode() ==
ISD::LOAD &&
2982 cast<ConstantSDNode>(
C.Op1)->getValueSizeInBits(0) <= 64 &&
2983 C.Op1->getAsZExtVal() == 0) {
2984 auto *L = cast<LoadSDNode>(
C.Op0.getOperand(0));
2985 if (L->getMemoryVT().getStoreSizeInBits().getFixedValue() <=
2986 C.Op0.getValueSizeInBits().getFixedValue()) {
2987 unsigned Type = L->getExtensionType();
2990 C.Op0 =
C.Op0.getOperand(0);
3000 auto *Shift = dyn_cast<ConstantSDNode>(
N.getOperand(1));
3004 uint64_t Amount = Shift->getZExtValue();
3005 if (Amount >=
N.getValueSizeInBits())
3020 unsigned ICmpType) {
3021 assert(Mask != 0 &&
"ANDs with zero should have been removed by now");
3043 if (EffectivelyUnsigned && CmpVal > 0 && CmpVal <=
Low) {
3049 if (EffectivelyUnsigned && CmpVal <
Low) {
3057 if (CmpVal == Mask) {
3063 if (EffectivelyUnsigned && CmpVal >= Mask -
Low && CmpVal < Mask) {
3069 if (EffectivelyUnsigned && CmpVal > Mask -
Low && CmpVal <= Mask) {
3077 if (EffectivelyUnsigned && CmpVal >= Mask -
High && CmpVal <
High) {
3083 if (EffectivelyUnsigned && CmpVal > Mask -
High && CmpVal <=
High) {
3112 if (
C.Op0.getValueType() == MVT::i128) {
3117 auto *Mask = dyn_cast<ConstantSDNode>(
C.Op1);
3118 if (Mask && Mask->getAPIntValue() == 0) {
3133 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1);
3136 uint64_t CmpVal = ConstOp1->getZExtValue();
3143 NewC.Op0 =
C.Op0.getOperand(0);
3144 NewC.Op1 =
C.Op0.getOperand(1);
3145 Mask = dyn_cast<ConstantSDNode>(NewC.Op1);
3148 MaskVal = Mask->getZExtValue();
3153 if (NewC.Op0.getValueType() != MVT::i64 ||
3168 MaskVal = -(CmpVal & -CmpVal);
3176 unsigned BitSize = NewC.Op0.getValueSizeInBits();
3177 unsigned NewCCMask, ShiftVal;
3179 NewC.Op0.getOpcode() ==
ISD::SHL &&
3181 (MaskVal >> ShiftVal != 0) &&
3182 ((CmpVal >> ShiftVal) << ShiftVal) == CmpVal &&
3184 MaskVal >> ShiftVal,
3187 NewC.Op0 = NewC.Op0.getOperand(0);
3188 MaskVal >>= ShiftVal;
3190 NewC.Op0.getOpcode() ==
ISD::SRL &&
3192 (MaskVal << ShiftVal != 0) &&
3193 ((CmpVal << ShiftVal) >> ShiftVal) == CmpVal &&
3195 MaskVal << ShiftVal,
3198 NewC.Op0 = NewC.Op0.getOperand(0);
3199 MaskVal <<= ShiftVal;
3210 if (Mask && Mask->getZExtValue() == MaskVal)
3215 C.CCMask = NewCCMask;
3223 if (
C.Op0.getValueType() != MVT::i128)
3241 bool Swap =
false, Invert =
false;
3260 C.CCMask ^=
C.CCValid;
3270 auto *Mask = dyn_cast<ConstantSDNode>(
C.Op0.getOperand(1));
3271 if (!Mask || Mask->getValueSizeInBits(0) > 64)
3274 if ((~Known.
Zero).getZExtValue() & ~Mask->getZExtValue())
3277 C.Op0 =
C.Op0.getOperand(0);
3289 C.CCValid = CCValid;
3292 C.CCMask =
CC < 4 ? 1 << (3 -
CC) : 0;
3295 C.CCMask =
CC < 4 ? ~(1 << (3 -
CC)) : -1;
3299 C.CCMask =
CC < 4 ? ~0U << (4 -
CC) : -1;
3302 C.CCMask =
CC < 4 ? ~(~0U << (4 -
CC)) : 0;
3306 C.CCMask =
CC < 4 ? ~0U << (3 -
CC) : -1;
3309 C.CCMask =
CC < 4 ? ~(~0U << (3 -
CC)) : 0;
3312 C.CCMask &= CCValid;
3320 bool IsSignaling =
false) {
3323 unsigned Opcode, CCValid;
3335 Comparison
C(CmpOp0, CmpOp1, Chain);
3337 if (
C.Op0.getValueType().isFloatingPoint()) {
3341 else if (!IsSignaling)
3363 C.CCMask &= ~SystemZ::CCMASK_CMP_UO;
3384 if (!
C.Op1.getNode()) {
3386 switch (
C.Op0.getOpcode()) {
3413 return DAG.
getNode(
C.Opcode,
DL, VTs,
C.Chain,
C.Op0,
C.Op1);
3415 return DAG.
getNode(
C.Opcode,
DL, MVT::i32,
C.Op0,
C.Op1);
3424 Op0 = DAG.
getNode(Extend,
DL, MVT::i64, Op0);
3425 Op1 = DAG.
getNode(Extend,
DL, MVT::i64, Op1);
3450 unsigned CCValid,
unsigned CCMask) {
3479 case CmpMode::Int:
return 0;
3499 case CmpMode::FP:
return 0;
3500 case CmpMode::StrictFP:
return 0;
3501 case CmpMode::SignalingFP:
return 0;
3533 int Mask[] = { Start, -1, Start + 1, -1 };
3553 !Subtarget.hasVectorEnhancements1()) {
3567 SDValue Ops[2] = { Res, NewChain };
3576 return DAG.
getNode(Opcode,
DL, VTs, Chain, CmpOp0, CmpOp1);
3578 return DAG.
getNode(Opcode,
DL, VT, CmpOp0, CmpOp1);
3591 bool IsSignaling)
const {
3594 assert (!IsSignaling || Chain);
3595 CmpMode Mode = IsSignaling ? CmpMode::SignalingFP :
3596 Chain ? CmpMode::StrictFP : IsFP ? CmpMode::FP : CmpMode::Int;
3597 bool Invert =
false;
3605 assert(IsFP &&
"Unexpected integer comparison");
3607 DL, VT, CmpOp1, CmpOp0, Chain);
3609 DL, VT, CmpOp0, CmpOp1, Chain);
3613 LT.getValue(1),
GE.getValue(1));
3622 assert(IsFP &&
"Unexpected integer comparison");
3624 DL, VT, CmpOp1, CmpOp0, Chain);
3626 DL, VT, CmpOp0, CmpOp1, Chain);
3630 LT.getValue(1),
GT.getValue(1));
3639 Cmp = getVectorCmp(DAG, Opcode,
DL, VT, CmpOp0, CmpOp1, Chain);
3643 Cmp = getVectorCmp(DAG, Opcode,
DL, VT, CmpOp1, CmpOp0, Chain);
3648 Chain =
Cmp.getValue(1);
3656 if (Chain && Chain.
getNode() !=
Cmp.getNode()) {
3669 EVT VT =
Op.getValueType();
3671 return lowerVectorSETCC(DAG,
DL, VT,
CC, CmpOp0, CmpOp1);
3680 bool IsSignaling)
const {
3686 EVT VT =
Op.getNode()->getValueType(0);
3688 SDValue Res = lowerVectorSETCC(DAG,
DL, VT,
CC, CmpOp0, CmpOp1,
3689 Chain, IsSignaling);
3753 cast<ConstantSDNode>(
C.Op1)->getValueSizeInBits(0) <= 64 &&
3754 C.Op1->getAsZExtVal() == 0) {
3762 SDValue Ops[] = {TrueOp, FalseOp,
3836 Chain = DAG.
getCopyToReg(Chain,
DL, SystemZ::R2D, GOTOffset, Glue);
3843 Node->getValueType(0),
3855 assert(Mask &&
"Missing call preserved mask for calling convention");
3863 Chain = DAG.
getNode(Opcode,
DL, NodeTys, Ops);
3870SDValue SystemZTargetLowering::lowerThreadPointer(
const SDLoc &
DL,
3902 SDValue TP = lowerThreadPointer(
DL, DAG);