24 #include "llvm/IR/IntrinsicsS390.h"
31 #define DEBUG_TYPE "systemz-lower"
37 : Op0(Op0In), Op1(Op1In), Chain(ChainIn),
38 Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {}
746 if (SplatBitSize > 64)
753 OpVals.push_back(((
unsigned) SignedValue));
761 if (
TII->isRxSBGMask(
Value, SplatBitSize, Start, End)) {
765 OpVals.push_back(Start - (64 - SplatBitSize));
766 OpVals.push_back(End - (64 - SplatBitSize));
785 if (tryValue(SplatBitsZ |
Upper | Lower))
792 return tryValue(SplatBitsZ | Middle);
807 unsigned HalfSize =
Width / 2;
812 if (HighValue != LowValue || 8 > HalfSize)
815 SplatBits = HighValue;
819 SplatBitSize =
Width;
827 BVN->
isConstantSplat(IntBits, SplatUndef, SplatBitSize, HasAnyUndefs, 128,
831 BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, 8,
836 bool ForCodeSize)
const {
838 if (
Imm.isZero() ||
Imm.isNegZero())
882 LongDisplacement(LongDispl), IndexReg(IdxReg) {}
905 switch (II->getIntrinsicID()) {
907 case Intrinsic::memset:
908 case Intrinsic::memmove:
914 if (isa<LoadInst>(
I) &&
I->hasOneUse()) {
915 auto *SingleUser = cast<Instruction>(*
I->user_begin());
916 if (SingleUser->getParent() ==
I->getParent()) {
917 if (isa<ICmpInst>(SingleUser)) {
918 if (
auto *
C = dyn_cast<ConstantInt>(SingleUser->getOperand(1)))
919 if (
C->getBitWidth() <= 64 &&
923 }
else if (isa<StoreInst>(SingleUser))
927 }
else if (
auto *StoreI = dyn_cast<StoreInst>(
I)) {
928 if (
auto *LoadI = dyn_cast<LoadInst>(StoreI->getValueOperand()))
929 if (LoadI->hasOneUse() && LoadI->getParent() ==
I->getParent())
934 if (HasVector && (isa<LoadInst>(
I) || isa<StoreInst>(
I))) {
942 Type *MemAccessTy = (isa<LoadInst>(
I) ?
I->getType() :
943 I->getOperand(0)->getType());
945 bool IsVectorAccess = MemAccessTy->
isVectorTy();
949 if (!IsVectorAccess && isa<StoreInst>(
I)) {
950 Value *DataOp =
I->getOperand(0);
951 if (isa<ExtractElementInst>(DataOp))
952 IsVectorAccess =
true;
957 if (!IsVectorAccess && isa<LoadInst>(
I) &&
I->hasOneUse()) {
958 User *LoadUser = *
I->user_begin();
959 if (isa<InsertElementInst>(LoadUser))
960 IsVectorAccess =
true;
963 if (IsFPAccess || IsVectorAccess)
991 return AM.
Scale == 0;
998 std::vector<EVT> &MemOps,
unsigned Limit,
const MemOp &
Op,
unsigned DstAS,
1000 const int MVCFastLen = 16;
1002 if (Limit != ~
unsigned(0)) {
1004 if (
Op.isMemcpy() &&
Op.allowOverlap() &&
Op.size() <= MVCFastLen)
1006 if (
Op.isMemset() &&
Op.size() - 1 <= MVCFastLen)
1008 if (
Op.isZeroMemset())
1013 SrcAS, FuncAttributes);
1024 unsigned FromBits =
FromType->getPrimitiveSizeInBits().getFixedSize();
1026 return FromBits > ToBits;
1034 return FromBits > ToBits;
1043 if (Constraint.
size() == 1) {
1044 switch (Constraint[0]) {
1070 }
else if (Constraint.
size() == 2 && Constraint[0] ==
'Z') {
1071 switch (Constraint[1]) {
1087 const char *constraint)
const {
1089 Value *CallOperandVal =
info.CallOperandVal;
1092 if (!CallOperandVal)
1096 switch (*constraint) {
1110 if (
type->isFloatingPointTy())
1115 if ((
type->isVectorTy() ||
type->isFloatingPointTy()) &&
1121 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1127 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1128 if (isUInt<12>(
C->getZExtValue()))
1133 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1139 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1140 if (isInt<20>(
C->getSExtValue()))
1145 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1146 if (
C->getZExtValue() == 0x7fffffff)
1156 static std::pair<unsigned, const TargetRegisterClass *>
1158 const unsigned *Map,
unsigned Size) {
1159 assert(*(Constraint.
end()-1) ==
'}' &&
"Missing '}'");
1160 if (isdigit(Constraint[2])) {
1165 return std::make_pair(Map[
Index], RC);
1167 return std::make_pair(0U,
nullptr);
1170 std::pair<unsigned, const TargetRegisterClass *>
1173 if (Constraint.
size() == 1) {
1175 switch (Constraint[0]) {
1180 return std::make_pair(0U, &SystemZ::GR64BitRegClass);
1182 return std::make_pair(0U, &SystemZ::GR128BitRegClass);
1183 return std::make_pair(0U, &SystemZ::GR32BitRegClass);
1187 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass);
1189 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass);
1190 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass);
1193 return std::make_pair(0U, &SystemZ::GRH32BitRegClass);
1198 return std::make_pair(0U, &SystemZ::FP64BitRegClass);
1200 return std::make_pair(0U, &SystemZ::FP128BitRegClass);
1201 return std::make_pair(0U, &SystemZ::FP32BitRegClass);
1207 return std::make_pair(0U, &SystemZ::VR32BitRegClass);
1209 return std::make_pair(0U, &SystemZ::VR64BitRegClass);
1210 return std::make_pair(0U, &SystemZ::VR128BitRegClass);
1215 if (Constraint.
size() > 0 && Constraint[0] ==
'{') {
1220 if (Constraint[1] ==
'r') {
1230 if (Constraint[1] ==
'f') {
1232 return std::make_pair(
1243 if (Constraint[1] ==
'v') {
1245 return std::make_pair(
1280 std::vector<SDValue> &Ops,
1283 if (Constraint.length() == 1) {
1284 switch (Constraint[0]) {
1286 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1289 Op.getValueType()));
1293 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1294 if (isUInt<12>(
C->getZExtValue()))
1296 Op.getValueType()));
1300 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1303 Op.getValueType()));
1307 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1308 if (isInt<20>(
C->getSExtValue()))
1310 Op.getValueType()));
1314 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1315 if (
C->getZExtValue() == 0x7fffffff)
1317 Op.getValueType()));
1328 #include "SystemZGenCallingConv.inc"
1332 static const MCPhysReg ScratchRegs[] = { SystemZ::R0D, SystemZ::R1D,
1338 Type *ToType)
const {
1355 for (
unsigned i = 0;
i <
Ins.size(); ++
i)
1360 for (
unsigned i = 0;
i < Outs.size(); ++
i)
1453 (NumParts == 2 && PartVT ==
MVT::i64))) &&
1454 "Unknown handling of i128 value.");
1455 if (ValueVT ==
MVT::i128 && NumParts == 1) {
1468 (NumParts == 2 && PartVT ==
MVT::i64))) &&
1469 "Unknown handling of i128 value.");
1470 if (ValueVT ==
MVT::i128 && NumParts == 1)
1497 unsigned NumFixedGPRs = 0;
1498 unsigned NumFixedFPRs = 0;
1499 for (
unsigned I = 0,
E = ArgLocs.size();
I !=
E; ++
I) {
1512 RC = &SystemZ::GR32BitRegClass;
1516 RC = &SystemZ::GR64BitRegClass;
1520 RC = &SystemZ::FP32BitRegClass;
1524 RC = &SystemZ::FP64BitRegClass;
1528 RC = &SystemZ::FP128BitRegClass;
1536 RC = &SystemZ::VR128BitRegClass;
1565 ArgValue = DAG.
getLoad(LocVT,
DL, Chain, FIN,
1576 unsigned ArgIndex =
Ins[
I].OrigArgIndex;
1578 while (
I + 1 !=
E &&
Ins[
I + 1].OrigArgIndex == ArgIndex) {
1580 unsigned PartOffset =
Ins[
I + 1].PartOffset;
1604 int64_t RegSaveOffset =
1619 &SystemZ::FP64BitRegClass);
1642 for (
unsigned I = 0,
E = ArgLocs.size();
I !=
E; ++
I) {
1649 if (
Reg == SystemZ::R6H ||
Reg == SystemZ::R6L ||
Reg == SystemZ::R6D)
1651 if (Outs[
I].Flags.isSwiftSelf() || Outs[
I].Flags.isSwiftError())
1713 for (
unsigned I = 0,
E = ArgLocs.size();
I !=
E; ++
I) {
1719 unsigned ArgIndex = Outs[
I].OrigArgIndex;
1721 if (
I + 1 !=
E && Outs[
I + 1].OrigArgIndex == ArgIndex) {
1723 Type *OrigArgType = CLI.
Args[Outs[
I].OrigArgIndex].Ty;
1729 SlotVT = Outs[
I].ArgVT;
1732 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
1733 MemOpChains.push_back(
1738 assert (Outs[
I].PartOffset == 0);
1739 while (
I + 1 !=
E && Outs[
I + 1].OrigArgIndex == ArgIndex) {
1740 SDValue PartValue = OutVals[
I + 1];
1741 unsigned PartOffset = Outs[
I + 1].PartOffset;
1744 MemOpChains.push_back(
1748 SlotVT.
getStoreSize()) &&
"Not enough space for argument part!");
1751 ArgValue = SpillSlot;
1762 RegsToPass.push_back(std::make_pair(VA.
getLocReg(), ArgValue));
1768 if (!StackPtr.getNode())
1779 MemOpChains.push_back(
1790 RegsToPass.push_back(std::make_pair(SystemZ::R3D, ShadowArgValue));
1796 if (!MemOpChains.empty())
1804 if (
auto *
G = dyn_cast<GlobalAddressSDNode>(
Callee)) {
1807 }
else if (
auto *
E = dyn_cast<ExternalSymbolSDNode>(
Callee)) {
1810 }
else if (IsTailCall) {
1817 for (
unsigned I = 0,
E = RegsToPass.size();
I !=
E; ++
I) {
1819 RegsToPass[
I].second, Glue);
1825 Ops.push_back(Chain);
1830 for (
unsigned I = 0,
E = RegsToPass.size();
I !=
E; ++
I)
1832 RegsToPass[
I].second.getValueType()));
1837 assert(
Mask &&
"Missing call preserved mask for calling convention");
1842 Ops.push_back(Glue);
1861 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, Ctx);
1865 for (
unsigned I = 0,
E = RetLocs.size();
I !=
E; ++
I) {
1887 bool DoesNotReturn,
bool IsReturnValueUsed)
const {
1894 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.
getContext());
1897 Args.push_back(Entry);
1927 for (
auto &Out : Outs)
1933 return RetCCInfo.
CheckReturn(Outs, RetCC_SystemZ);
1954 if (RetLocs.empty())
1963 RetOps.push_back(Chain);
1964 for (
unsigned I = 0,
E = RetLocs.size();
I !=
E; ++
I) {
1984 RetOps.push_back(Glue);
1993 unsigned &CCValid) {
1994 unsigned Id = cast<ConstantSDNode>(
Op.getOperand(1))->getZExtValue();
1996 case Intrinsic::s390_tbegin:
2001 case Intrinsic::s390_tbegin_nofloat:
2006 case Intrinsic::s390_tend:
2020 unsigned Id = cast<ConstantSDNode>(
Op.getOperand(0))->getZExtValue();
2022 case Intrinsic::s390_vpkshs:
2023 case Intrinsic::s390_vpksfs:
2024 case Intrinsic::s390_vpksgs:
2029 case Intrinsic::s390_vpklshs:
2030 case Intrinsic::s390_vpklsfs:
2031 case Intrinsic::s390_vpklsgs:
2036 case Intrinsic::s390_vceqbs:
2037 case Intrinsic::s390_vceqhs:
2038 case Intrinsic::s390_vceqfs:
2039 case Intrinsic::s390_vceqgs:
2044 case Intrinsic::s390_vchbs:
2045 case Intrinsic::s390_vchhs:
2046 case Intrinsic::s390_vchfs:
2047 case Intrinsic::s390_vchgs:
2052 case Intrinsic::s390_vchlbs:
2053 case Intrinsic::s390_vchlhs:
2054 case Intrinsic::s390_vchlfs:
2055 case Intrinsic::s390_vchlgs:
2060 case Intrinsic::s390_vtm:
2065 case Intrinsic::s390_vfaebs:
2066 case Intrinsic::s390_vfaehs:
2067 case Intrinsic::s390_vfaefs:
2072 case Intrinsic::s390_vfaezbs:
2073 case Intrinsic::s390_vfaezhs:
2074 case Intrinsic::s390_vfaezfs:
2079 case Intrinsic::s390_vfeebs:
2080 case Intrinsic::s390_vfeehs:
2081 case Intrinsic::s390_vfeefs:
2086 case Intrinsic::s390_vfeezbs:
2087 case Intrinsic::s390_vfeezhs:
2088 case Intrinsic::s390_vfeezfs:
2093 case Intrinsic::s390_vfenebs:
2094 case Intrinsic::s390_vfenehs:
2095 case Intrinsic::s390_vfenefs:
2100 case Intrinsic::s390_vfenezbs:
2101 case Intrinsic::s390_vfenezhs:
2102 case Intrinsic::s390_vfenezfs:
2107 case Intrinsic::s390_vistrbs:
2108 case Intrinsic::s390_vistrhs:
2109 case Intrinsic::s390_vistrfs:
2114 case Intrinsic::s390_vstrcbs:
2115 case Intrinsic::s390_vstrchs:
2116 case Intrinsic::s390_vstrcfs:
2121 case Intrinsic::s390_vstrczbs:
2122 case Intrinsic::s390_vstrczhs:
2123 case Intrinsic::s390_vstrczfs:
2128 case Intrinsic::s390_vstrsb:
2129 case Intrinsic::s390_vstrsh:
2130 case Intrinsic::s390_vstrsf:
2135 case Intrinsic::s390_vstrszb:
2136 case Intrinsic::s390_vstrszh:
2137 case Intrinsic::s390_vstrszf:
2142 case Intrinsic::s390_vfcedbs:
2143 case Intrinsic::s390_vfcesbs:
2148 case Intrinsic::s390_vfchdbs:
2149 case Intrinsic::s390_vfchsbs:
2154 case Intrinsic::s390_vfchedbs:
2155 case Intrinsic::s390_vfchesbs:
2160 case Intrinsic::s390_vftcidb:
2161 case Intrinsic::s390_vftcisb:
2166 case Intrinsic::s390_tdc:
2180 unsigned NumOps =
Op.getNumOperands();
2183 Ops.push_back(
Op.getOperand(0));
2184 for (
unsigned I = 2;
I < NumOps; ++
I)
2185 Ops.push_back(
Op.getOperand(
I));
2187 assert(
Op->getNumValues() == 2 &&
"Expected only CC result and chain");
2193 return Intr.getNode();
2200 unsigned NumOps =
Op.getNumOperands();
2203 for (
unsigned I = 1;
I < NumOps; ++
I)
2204 Ops.push_back(
Op.getOperand(
I));
2207 return Intr.getNode();
2217 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \
2218 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \
2219 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X
2244 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1.getNode());
2248 int64_t
Value = ConstOp1->getSExtValue();
2264 if (!
C.Op0.hasOneUse() ||
2270 auto *
Load = cast<LoadSDNode>(
C.Op0);
2271 unsigned NumBits =
Load->getMemoryVT().getSizeInBits();
2272 if ((NumBits != 8 && NumBits != 16) ||
2273 NumBits !=
Load->getMemoryVT().getStoreSizeInBits())
2278 auto *ConstOp1 = cast<ConstantSDNode>(
C.Op1);
2283 int64_t SignedValue = ConstOp1->getSExtValue();
2290 }
else if (NumBits == 8) {
2317 Load->getExtensionType() != ExtType) {
2319 Load->getBasePtr(),
Load->getPointerInfo(),
2320 Load->getMemoryVT(),
Load->getAlign(),
2321 Load->getMemOperand()->getFlags());
2328 Value != ConstOp1->getZExtValue())
2335 auto *
Load = dyn_cast<LoadSDNode>(
Op.getNode());
2341 switch (
Load->getExtensionType()) {
2364 if (isa<ConstantFPSDNode>(
C.Op1))
2369 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1);
2370 if (ConstOp1 && ConstOp1->getZExtValue() == 0)
2399 unsigned Opcode0 =
C.Op0.getOpcode();
2407 cast<ConstantSDNode>(
C.Op0.getOperand(1))->getZExtValue() == 0xffffffff)
2422 ((
N->getOperand(0) ==
C.Op0 &&
N->getOperand(1) ==
C.Op1) ||
2423 (
N->getOperand(0) ==
C.Op1 &&
N->getOperand(1) ==
C.Op0))) {
2441 auto *
C1 = dyn_cast<ConstantFPSDNode>(
C.Op1);
2442 if (
C1 &&
C1->isZero()) {
2464 cast<ConstantSDNode>(
C.Op1)->getZExtValue() == 0) {
2465 auto *
C1 = dyn_cast<ConstantSDNode>(
C.Op0.getOperand(1));
2466 if (
C1 &&
C1->getZExtValue() == 32) {
2471 cast<VTSDNode>(
N->getOperand(1))->getVT() ==
MVT::i32) {
2486 C.Op0.getOperand(0).getOpcode() ==
ISD::LOAD &&
2488 cast<ConstantSDNode>(
C.Op1)->getZExtValue() == 0) {
2489 auto *L = cast<LoadSDNode>(
C.Op0.getOperand(0));
2490 if (L->getMemoryVT().getStoreSizeInBits().getFixedSize() <=
2491 C.Op0.getValueSizeInBits().getFixedSize()) {
2492 unsigned Type = L->getExtensionType();
2495 C.Op0 =
C.Op0.getOperand(0);
2505 auto *
Shift = dyn_cast<ConstantSDNode>(
N.getOperand(1));
2510 if (Amount >=
N.getValueSizeInBits())
2525 unsigned ICmpType) {
2526 assert(
Mask != 0 &&
"ANDs with zero should have been removed by now");
2549 if (EffectivelyUnsigned && CmpVal > 0 && CmpVal <=
Low) {
2555 if (EffectivelyUnsigned && CmpVal <
Low) {
2563 if (CmpVal ==
Mask) {
2569 if (EffectivelyUnsigned && CmpVal >=
Mask -
Low && CmpVal <
Mask) {
2575 if (EffectivelyUnsigned && CmpVal >
Mask -
Low && CmpVal <=
Mask) {
2583 if (EffectivelyUnsigned && CmpVal >=
Mask -
High && CmpVal <
High) {
2589 if (EffectivelyUnsigned && CmpVal >
Mask -
High && CmpVal <=
High) {
2618 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1);
2621 uint64_t CmpVal = ConstOp1->getZExtValue();
2628 NewC.Op0 =
C.Op0.getOperand(0);
2629 NewC.Op1 =
C.Op0.getOperand(1);
2630 Mask = dyn_cast<ConstantSDNode>(NewC.Op1);
2633 MaskVal =
Mask->getZExtValue();
2638 if (NewC.Op0.getValueType() !=
MVT::i64 ||
2653 MaskVal = -(CmpVal & -CmpVal);
2661 unsigned BitSize = NewC.Op0.getValueSizeInBits();
2662 unsigned NewCCMask, ShiftVal;
2664 NewC.Op0.getOpcode() ==
ISD::SHL &&
2666 (MaskVal >> ShiftVal != 0) &&
2667 ((CmpVal >> ShiftVal) << ShiftVal) == CmpVal &&
2669 MaskVal >> ShiftVal,
2672 NewC.Op0 = NewC.Op0.getOperand(0);
2673 MaskVal >>= ShiftVal;
2675 NewC.Op0.getOpcode() ==
ISD::SRL &&
2677 (MaskVal << ShiftVal != 0) &&
2678 ((CmpVal << ShiftVal) >> ShiftVal) == CmpVal &&
2680 MaskVal << ShiftVal,
2683 NewC.Op0 = NewC.Op0.getOperand(0);
2684 MaskVal <<= ShiftVal;
2695 if (
Mask &&
Mask->getZExtValue() == MaskVal)
2700 C.CCMask = NewCCMask;
2710 auto *
Mask = dyn_cast<ConstantSDNode>(
C.Op0.getOperand(1));
2714 if ((~Known.
Zero).getZExtValue() & ~
Mask->getZExtValue())
2717 C.Op0 =
C.Op0.getOperand(0);
2729 C.CCValid = CCValid;
2732 C.CCMask = CC < 4 ? 1 << (3 - CC) : 0;
2735 C.CCMask = CC < 4 ? ~(1 << (3 - CC)) : -1;
2739 C.CCMask = CC < 4 ? ~0U << (4 - CC) : -1;
2742 C.CCMask = CC < 4 ? ~(~0U << (4 - CC)) : 0;
2746 C.CCMask = CC < 4 ? ~0U << (3 - CC) : -1;
2749 C.CCMask = CC < 4 ? ~(~0U << (3 - CC)) : 0;
2752 C.CCMask &= CCValid;
2760 bool IsSignaling =
false) {
2764 unsigned Opcode, CCValid;
2774 Comparison
C(CmpOp0, CmpOp1, Chain);
2776 if (
C.Op0.getValueType().isFloatingPoint()) {
2780 else if (!IsSignaling)
2822 if (!
C.Op1.getNode()) {
2824 switch (
C.Op0.getOpcode()) {
2830 return SDValue(Node, Node->getNumValues() - 1);
2846 return DAG.
getNode(
C.Opcode,
DL, VTs,
C.Chain,
C.Op0,
C.Op1);
2883 unsigned CCValid,
unsigned CCMask) {
2966 int Mask[] = { Start, -1, Start + 1, -1 };
3000 SDValue Ops[2] = { Res, NewChain };
3009 return DAG.
getNode(Opcode,
DL, VTs, Chain, CmpOp0, CmpOp1);
3011 return DAG.
getNode(Opcode,
DL, VT, CmpOp0, CmpOp1);
3024 bool IsSignaling)
const {
3027 assert (!IsSignaling || Chain);
3030 bool Invert =
false;
3038 assert(IsFP &&
"Unexpected integer comparison");
3040 DL, VT, CmpOp1, CmpOp0, Chain);
3042 DL, VT, CmpOp0, CmpOp1, Chain);
3046 LT.getValue(1),
GE.getValue(1));
3055 assert(IsFP &&
"Unexpected integer comparison");
3057 DL, VT, CmpOp1, CmpOp0, Chain);
3059 DL, VT, CmpOp0, CmpOp1, Chain);
3063 LT.getValue(1),
GT.getValue(1));
3072 Cmp = getVectorCmp(DAG, Opcode,
DL, VT, CmpOp0, CmpOp1, Chain);
3076 Cmp = getVectorCmp(DAG, Opcode,
DL, VT, CmpOp1, CmpOp0, Chain);
3081 Chain =
Cmp.getValue(1);
3089 if (Chain && Chain.
getNode() !=
Cmp.getNode()) {
3102 EVT VT =
Op.getValueType();
3104 return lowerVectorSETCC(DAG,
DL, VT, CC, CmpOp0, CmpOp1);
3106 Comparison
C(
getCmp(DAG, CmpOp0, CmpOp1, CC,
DL));
3113 bool IsSignaling)
const {
3119 EVT VT =
Op.getNode()->getValueType(0);
3121 SDValue Res = lowerVectorSETCC(DAG,
DL, VT, CC, CmpOp0, CmpOp1,
3122 Chain, IsSignaling);
3126 Comparison
C(
getCmp(DAG, CmpOp0, CmpOp1, CC,
DL, Chain, IsSignaling));
3141 Comparison
C(
getCmp(DAG, CmpOp0, CmpOp1, CC,
DL));
3154 cast<ConstantSDNode>(Neg.
getOperand(0))->getZExtValue() == 0 &&
3180 Comparison
C(
getCmp(DAG, CmpOp0, CmpOp1, CC,
DL));
3189 cast<ConstantSDNode>(
C.Op1)->getZExtValue() == 0) {
3197 SDValue Ops[] = {TrueOp, FalseOp,
3208 int64_t
Offset = Node->getOffset();
3223 if (Offset != 0 && (Offset & 1) == 0) {
3268 Chain = DAG.
getCopyToReg(Chain,
DL, SystemZ::R2D, GOTOffset, Glue);
3273 Ops.push_back(Chain);
3275 Node->getValueType(0),
3280 Ops.push_back(DAG.
getRegister(SystemZ::R2D, PtrVT));
3281 Ops.push_back(DAG.
getRegister(SystemZ::R12D, PtrVT));
3287 assert(
Mask &&
"Missing call preserved mask for calling convention");
3291 Ops.push_back(Glue);
3295 Chain = DAG.
getNode(Opcode,
DL, NodeTys, Ops);
3302 SDValue SystemZTargetLowering::lowerThreadPointer(
const SDLoc &
DL,
3334 SDValue TP = lowerThreadPointer(
DL, DAG);
3418 int64_t
Offset = Node->getOffset();
3442 if (
CP->isMachineConstantPoolEntry())
3461 unsigned Depth = cast<ConstantSDNode>(
Op.getOperand(0))->getZExtValue();
3468 int BackChainIdx = TFL->getOrCreateFramePointerSaveIndex(MF);
3489 unsigned Depth = cast<ConstantSDNode>(
Op.getOperand(0))->getZExtValue();
3506 EVT InVT =
In.getValueType();
3507 EVT ResVT =
Op.getValueType();
3512 if (
auto *LoadN = dyn_cast<LoadSDNode>(
In))
3515 LoadN->getBasePtr(), LoadN->getMemOperand());
3556 return lowerVASTART_XPLINK(
Op, DAG);
3558 return lowerVASTART_ELF(
Op, DAG);
3573 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
3587 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
3591 const unsigned NumFields = 4;
3602 for (
unsigned I = 0;
I < NumFields; ++
I) {
3607 MemOps[
I] = DAG.
getStore(Chain,
DL, Fields[
I], FieldAddr,
3619 const Value *DstSV = cast<SrcValueSDNode>(
Op.getOperand(3))->getValue();
3620 const Value *SrcSV = cast<SrcValueSDNode>(
Op.getOperand(4))->getValue();
3626 Align(8),
false,
false,
3632 SystemZTargetLowering::lowerDYNAMIC_STACKALLOC(
SDValue Op,
3635 return lowerDYNAMIC_STACKALLOC_XPLINK(
Op, DAG);
3637 return lowerDYNAMIC_STACKALLOC_ELF(
Op, DAG);
3641 SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_XPLINK(
SDValue Op,
3654 (RealignOpt ? cast<ConstantSDNode>(
Align)->getZExtValue() : 0);
3664 if (ExtraAlignSpace)
3668 bool IsSigned =
false;
3669 bool DoesNotReturn =
false;
3670 bool IsReturnValueUsed =
false;
3671 EVT VT =
Op.getValueType();
3682 Register SPReg = Regs.getStackPointerRegister();
3693 if (ExtraAlignSpace) {
3705 SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_ELF(
SDValue Op,
3720 (RealignOpt ? cast<ConstantSDNode>(
Align)->getZExtValue() : 0);
3739 if (ExtraAlignSpace)
3773 Chain = DAG.
getStore(Chain,
DL, Backchain, getBackchainAddress(NewSP, DAG),
3780 SDValue SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET(
3789 EVT VT =
Op.getValueType();
3796 Op.getOperand(1), Ops[1], Ops[0]);
3802 Op.getOperand(0),
Op.getOperand(1), Ops[1], Ops[0]);
3826 LL, RL, Ops[1], Ops[0]);
3837 EVT VT =
Op.getValueType();
3844 Op.getOperand(1), Ops[1], Ops[0]);
3850 Op.getOperand(0),
Op.getOperand(1), Ops[1], Ops[0]);
3858 EVT VT =
Op.getValueType();
3878 EVT VT =
Op.getValueType();
3885 Op.getOperand(0),
Op.getOperand(1), Ops[1], Ops[0]);
3893 SDValue Ops[] = {
Op.getOperand(0),
Op.getOperand(1)};
3902 if ((Masks[0] >> 32) == 0xffffffff &&
uint32_t(Masks[1]) == 0xffffffff)
3904 else if ((Masks[1] >> 32) == 0xffffffff &&
uint32_t(Masks[0]) == 0xffffffff)
3919 int64_t
Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue());
3951 unsigned BaseOp = 0;
3952 unsigned CCValid = 0;
3953 unsigned CCMask = 0;
3955 switch (
Op.getOpcode()) {
4006 MVT VT =
N->getSimpleValueType(0);
4016 unsigned BaseOp = 0;
4017 unsigned CCValid = 0;
4018 unsigned CCMask = 0;
4020 switch (
Op.getOpcode()) {
4057 EVT VT =
Op.getValueType();