24#include "llvm/IR/IntrinsicsS390.h"
32#define DEBUG_TYPE "systemz-lower"
38 : Op0(Op0In), Op1(Op1In), Chain(ChainIn),
39 Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {}
89 if (Subtarget.hasHighWord())
95 if (Subtarget.hasVector()) {
102 if (Subtarget.hasVectorEnhancements1())
107 if (Subtarget.hasVector()) {
140 for (
unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
141 I <= MVT::LAST_FP_VALUETYPE;
167 for (
unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
168 I <= MVT::LAST_INTEGER_VALUETYPE;
204 if (Subtarget.hasPopulationCount())
223 if (!Subtarget.hasFPExtension())
229 if (Subtarget.hasFPExtension())
234 if (Subtarget.hasFPExtension())
272 if (!Subtarget.hasFPExtension()) {
285 if (Subtarget.hasMiscellaneousExtensions3()) {
370 if (VT != MVT::v2i64)
376 if (Subtarget.hasVectorEnhancements1())
403 if (Subtarget.hasVector()) {
425 if (Subtarget.hasVectorEnhancements2()) {
446 for (
unsigned I = MVT::FIRST_FP_VALUETYPE;
447 I <= MVT::LAST_FP_VALUETYPE;
455 if (Subtarget.hasFPExtension()) {
483 if (Subtarget.hasFPExtension()) {
494 if (Subtarget.hasVector()) {
540 if (Subtarget.hasVectorEnhancements1()) {
547 if (Subtarget.hasVectorEnhancements1()) {
601 for (
auto VT : { MVT::f32, MVT::f64, MVT::f128,
602 MVT::v4f32, MVT::v2f64 }) {
611 if (!Subtarget.hasVectorEnhancements1()) {
617 if (Subtarget.hasVectorEnhancements1())
627 if (Subtarget.hasVectorEnhancements1()) {
639 if (!Subtarget.hasVector()) {
694 struct RTLibCallMapping {
698 static RTLibCallMapping RTLibCallCommon[] = {
699#define HANDLE_LIBCALL(code, name) {RTLIB::code, name},
700#include "ZOSLibcallNames.def"
702 for (
auto &
E : RTLibCallCommon)
708 return Subtarget.hasSoftFloat();
730 return Subtarget.hasVectorEnhancements1();
743 if (!Subtarget.hasVector() ||
744 (isFP128 && !Subtarget.hasVectorEnhancements1()))
766 if (SplatBitSize > 64)
772 if (isInt<16>(SignedValue)) {
781 if (
TII->isRxSBGMask(
Value, SplatBitSize, Start,
End)) {
803 uint64_t Lower = SplatUndefZ & maskTrailingOnes<uint64_t>(LowerBits);
804 uint64_t Upper = SplatUndefZ & maskLeadingOnes<uint64_t>(UpperBits);
811 uint64_t Middle = SplatUndefZ & ~Upper & ~Lower;
812 return tryValue(SplatBitsZ | Middle);
827 unsigned HalfSize = Width / 2;
832 if (HighValue != LowValue || 8 > HalfSize)
835 SplatBits = HighValue;
839 SplatBitSize = Width;
847 BVN->
isConstantSplat(IntBits, SplatUndef, SplatBitSize, HasAnyUndefs, 128,
851 BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, 8,
856 bool ForCodeSize)
const {
858 if (Imm.isZero() || Imm.isNegZero())
880 if (Subtarget.hasInterlockedAccess1() &&
894 return isInt<32>(Imm) || isUInt<32>(Imm);
899 return isUInt<32>(Imm) || isUInt<32>(-Imm);
921 LongDisplacement(LongDispl), IndexReg(IdxReg) {}
944 switch (II->getIntrinsicID()) {
946 case Intrinsic::memset:
947 case Intrinsic::memmove:
948 case Intrinsic::memcpy:
953 if (isa<LoadInst>(
I) &&
I->hasOneUse()) {
954 auto *SingleUser = cast<Instruction>(*
I->user_begin());
955 if (SingleUser->getParent() ==
I->getParent()) {
956 if (isa<ICmpInst>(SingleUser)) {
957 if (
auto *
C = dyn_cast<ConstantInt>(SingleUser->getOperand(1)))
958 if (
C->getBitWidth() <= 64 &&
959 (isInt<16>(
C->getSExtValue()) || isUInt<16>(
C->getZExtValue())))
962 }
else if (isa<StoreInst>(SingleUser))
966 }
else if (
auto *StoreI = dyn_cast<StoreInst>(
I)) {
967 if (
auto *LoadI = dyn_cast<LoadInst>(StoreI->getValueOperand()))
968 if (LoadI->hasOneUse() && LoadI->getParent() ==
I->getParent())
973 if (HasVector && (isa<LoadInst>(
I) || isa<StoreInst>(
I))) {
981 Type *MemAccessTy = (isa<LoadInst>(
I) ?
I->getType() :
982 I->getOperand(0)->getType());
984 bool IsVectorAccess = MemAccessTy->
isVectorTy();
988 if (!IsVectorAccess && isa<StoreInst>(
I)) {
989 Value *DataOp =
I->getOperand(0);
990 if (isa<ExtractElementInst>(DataOp))
991 IsVectorAccess =
true;
996 if (!IsVectorAccess && isa<LoadInst>(
I) &&
I->hasOneUse()) {
997 User *LoadUser = *
I->user_begin();
998 if (isa<InsertElementInst>(LoadUser))
999 IsVectorAccess =
true;
1002 if (IsFPAccess || IsVectorAccess)
1020 bool RequireD12 = Subtarget.hasVector() && Ty->
isVectorTy();
1030 return AM.
Scale == 0;
1037 std::vector<EVT> &MemOps,
unsigned Limit,
const MemOp &
Op,
unsigned DstAS,
1038 unsigned SrcAS,
const AttributeList &FuncAttributes)
const {
1039 const int MVCFastLen = 16;
1041 if (Limit != ~
unsigned(0)) {
1043 if (
Op.isMemcpy() &&
Op.allowOverlap() &&
Op.size() <= MVCFastLen)
1045 if (
Op.isMemset() &&
Op.size() - 1 <= MVCFastLen)
1047 if (
Op.isZeroMemset())
1052 SrcAS, FuncAttributes);
1057 return Subtarget.hasVector() ? MVT::v2i64 : MVT::Other;
1061 if (!FromType->isIntegerTy() || !ToType->
isIntegerTy())
1063 unsigned FromBits = FromType->getPrimitiveSizeInBits().getFixedValue();
1065 return FromBits > ToBits;
1073 return FromBits > ToBits;
1082 if (Constraint.
size() == 1) {
1083 switch (Constraint[0]) {
1109 }
else if (Constraint.
size() == 2 && Constraint[0] ==
'Z') {
1110 switch (Constraint[1]) {
1126 const char *constraint)
const {
1128 Value *CallOperandVal =
info.CallOperandVal;
1131 if (!CallOperandVal)
1135 switch (*constraint) {
1153 if (Subtarget.hasVector())
1159 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1160 if (isUInt<8>(
C->getZExtValue()))
1165 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1166 if (isUInt<12>(
C->getZExtValue()))
1171 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1172 if (isInt<16>(
C->getSExtValue()))
1177 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1178 if (isInt<20>(
C->getSExtValue()))
1183 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1184 if (
C->getZExtValue() == 0x7fffffff)
1194static std::pair<unsigned, const TargetRegisterClass *>
1196 const unsigned *Map,
unsigned Size) {
1197 assert(*(Constraint.
end()-1) ==
'}' &&
"Missing '}'");
1198 if (isdigit(Constraint[2])) {
1203 return std::make_pair(Map[
Index], RC);
1205 return std::make_pair(0U,
nullptr);
1208std::pair<unsigned, const TargetRegisterClass *>
1211 if (Constraint.
size() == 1) {
1213 switch (Constraint[0]) {
1218 return std::make_pair(0U, &SystemZ::GR64BitRegClass);
1220 return std::make_pair(0U, &SystemZ::GR128BitRegClass);
1221 return std::make_pair(0U, &SystemZ::GR32BitRegClass);
1225 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass);
1226 else if (VT == MVT::i128)
1227 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass);
1228 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass);
1231 return std::make_pair(0U, &SystemZ::GRH32BitRegClass);
1236 return std::make_pair(0U, &SystemZ::FP64BitRegClass);
1238 return std::make_pair(0U, &SystemZ::FP128BitRegClass);
1239 return std::make_pair(0U, &SystemZ::FP32BitRegClass);
1244 if (Subtarget.hasVector()) {
1246 return std::make_pair(0U, &SystemZ::VR32BitRegClass);
1248 return std::make_pair(0U, &SystemZ::VR64BitRegClass);
1249 return std::make_pair(0U, &SystemZ::VR128BitRegClass);
1254 if (Constraint.
size() > 0 && Constraint[0] ==
'{') {
1258 auto getVTSizeInBits = [&VT]() {
1266 if (Constraint[1] ==
'r') {
1267 if (getVTSizeInBits() == 32)
1270 if (getVTSizeInBits() == 128)
1276 if (Constraint[1] ==
'f') {
1278 return std::make_pair(
1280 if (getVTSizeInBits() == 32)
1283 if (getVTSizeInBits() == 128)
1289 if (Constraint[1] ==
'v') {
1290 if (!Subtarget.hasVector())
1291 return std::make_pair(
1293 if (getVTSizeInBits() == 32)
1296 if (getVTSizeInBits() == 64)
1326 if (Constraint.
size() == 1) {
1327 switch (Constraint[0]) {
1329 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1330 if (isUInt<8>(
C->getZExtValue()))
1332 Op.getValueType()));
1336 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1337 if (isUInt<12>(
C->getZExtValue()))
1339 Op.getValueType()));
1343 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1344 if (isInt<16>(
C->getSExtValue()))
1346 Op.getValueType()));
1350 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1351 if (isInt<20>(
C->getSExtValue()))
1353 Op.getValueType()));
1357 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1358 if (
C->getZExtValue() == 0x7fffffff)
1360 Op.getValueType()));
1371#include "SystemZGenCallingConv.inc"
1375 static const MCPhysReg ScratchRegs[] = { SystemZ::R0D, SystemZ::R1D,
1381 Type *ToType)
const {
1398 for (
unsigned i = 0; i < Ins.size(); ++i)
1403 for (
unsigned i = 0; i < Outs.
size(); ++i)
1462 if (BitCastToType == MVT::v2i64)
1479 MVT::Untyped,
Hi,
Lo);
1494 unsigned NumParts,
MVT PartVT, std::optional<CallingConv::ID>
CC)
const {
1496 if (ValueVT.
getSizeInBits() == 128 && NumParts == 1 && PartVT == MVT::Untyped) {
1507 MVT PartVT,
EVT ValueVT, std::optional<CallingConv::ID>
CC)
const {
1508 if (ValueVT.
getSizeInBits() == 128 && NumParts == 1 && PartVT == MVT::Untyped) {
1530 if (Subtarget.hasVector())
1539 unsigned NumFixedGPRs = 0;
1540 unsigned NumFixedFPRs = 0;
1541 for (
unsigned I = 0,
E = ArgLocs.
size();
I !=
E; ++
I) {
1554 RC = &SystemZ::GR32BitRegClass;
1558 RC = &SystemZ::GR64BitRegClass;
1562 RC = &SystemZ::FP32BitRegClass;
1566 RC = &SystemZ::FP64BitRegClass;
1570 RC = &SystemZ::FP128BitRegClass;
1578 RC = &SystemZ::VR128BitRegClass;
1607 ArgValue = DAG.
getLoad(LocVT,
DL, Chain, FIN,
1618 unsigned ArgIndex = Ins[
I].OrigArgIndex;
1619 assert (Ins[
I].PartOffset == 0);
1620 while (
I + 1 !=
E && Ins[
I + 1].OrigArgIndex == ArgIndex) {
1622 unsigned PartOffset = Ins[
I + 1].PartOffset;
1645 int64_t VarArgOffset = CCInfo.
getStackSize() + Regs->getCallFrameSize();
1663 int64_t RegSaveOffset =
1678 &SystemZ::FP64BitRegClass);
1696 MRI.addLiveIn(Regs->getADARegister(), ADAvReg);
1708 for (
unsigned I = 0,
E = ArgLocs.
size();
I !=
E; ++
I) {
1715 if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D)
1717 if (Outs[
I].Flags.isSwiftSelf() || Outs[
I].Flags.isSwiftError())
1724 unsigned Offset,
bool LoadAdr =
false) {
1747 bool LoadAddr =
false;
1748 const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV);
1769 unsigned ADADelta = 0;
1770 unsigned EPADelta = 8;
1775 if (
auto *
G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1776 bool IsInternal = (
G->getGlobal()->hasInternalLinkage() ||
1777 G->getGlobal()->hasPrivateLinkage());
1792 }
else if (
auto *
E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1834 if (Subtarget.hasVector()) {
1860 for (
unsigned I = 0,
E = ArgLocs.
size();
I !=
E; ++
I) {
1866 unsigned ArgIndex = Outs[
I].OrigArgIndex;
1868 if (
I + 1 !=
E && Outs[
I + 1].OrigArgIndex == ArgIndex) {
1870 Type *OrigArgType = CLI.
Args[Outs[
I].OrigArgIndex].Ty;
1876 SlotVT = Outs[
I].ArgVT;
1879 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
1885 assert (Outs[
I].PartOffset == 0);
1886 while (
I + 1 !=
E && Outs[
I + 1].OrigArgIndex == ArgIndex) {
1887 SDValue PartValue = OutVals[
I + 1];
1888 unsigned PartOffset = Outs[
I + 1].PartOffset;
1895 SlotVT.
getStoreSize()) &&
"Not enough space for argument part!");
1898 ArgValue = SpillSlot;
1915 if (!StackPtr.getNode())
1937 RegsToPass.
push_back(std::make_pair(SystemZ::R3D, ShadowArgValue));
1943 if (!MemOpChains.
empty())
1956 ->getAddressOfCalleeRegister();
1959 Callee = DAG.
getRegister(CalleeReg, Callee.getValueType());
1964 if (
auto *
G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1967 }
else if (
auto *
E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1970 }
else if (IsTailCall) {
1973 Callee = DAG.
getRegister(SystemZ::R1D, Callee.getValueType());
1978 for (
unsigned I = 0,
E = RegsToPass.
size();
I !=
E; ++
I) {
1980 RegsToPass[
I].second, Glue);
1991 for (
unsigned I = 0,
E = RegsToPass.
size();
I !=
E; ++
I)
1993 RegsToPass[
I].second.getValueType()));
1997 const uint32_t *Mask =
TRI->getCallPreservedMask(MF, CallConv);
1998 assert(Mask &&
"Missing call preserved mask for calling convention");
2022 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, Ctx);
2026 for (
unsigned I = 0,
E = RetLocs.
size();
I !=
E; ++
I) {
2048 bool DoesNotReturn,
bool IsReturnValueUsed)
const {
2050 Args.reserve(Ops.
size());
2055 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.
getContext());
2058 Args.push_back(Entry);
2083 if (Subtarget.hasVector())
2088 for (
auto &Out : Outs)
2089 if (Out.ArgVT == MVT::i128)
2094 return RetCCInfo.
CheckReturn(Outs, RetCC_SystemZ);
2106 if (Subtarget.hasVector())
2115 if (RetLocs.
empty())
2125 for (
unsigned I = 0,
E = RetLocs.
size();
I !=
E; ++
I) {
2154 unsigned &CCValid) {
2155 unsigned Id = cast<ConstantSDNode>(
Op.getOperand(1))->getZExtValue();
2157 case Intrinsic::s390_tbegin:
2162 case Intrinsic::s390_tbegin_nofloat:
2167 case Intrinsic::s390_tend:
2181 unsigned Id = cast<ConstantSDNode>(
Op.getOperand(0))->getZExtValue();
2183 case Intrinsic::s390_vpkshs:
2184 case Intrinsic::s390_vpksfs:
2185 case Intrinsic::s390_vpksgs:
2190 case Intrinsic::s390_vpklshs:
2191 case Intrinsic::s390_vpklsfs:
2192 case Intrinsic::s390_vpklsgs:
2197 case Intrinsic::s390_vceqbs:
2198 case Intrinsic::s390_vceqhs:
2199 case Intrinsic::s390_vceqfs:
2200 case Intrinsic::s390_vceqgs:
2205 case Intrinsic::s390_vchbs:
2206 case Intrinsic::s390_vchhs:
2207 case Intrinsic::s390_vchfs:
2208 case Intrinsic::s390_vchgs:
2213 case Intrinsic::s390_vchlbs:
2214 case Intrinsic::s390_vchlhs:
2215 case Intrinsic::s390_vchlfs:
2216 case Intrinsic::s390_vchlgs:
2221 case Intrinsic::s390_vtm:
2226 case Intrinsic::s390_vfaebs:
2227 case Intrinsic::s390_vfaehs:
2228 case Intrinsic::s390_vfaefs:
2233 case Intrinsic::s390_vfaezbs:
2234 case Intrinsic::s390_vfaezhs:
2235 case Intrinsic::s390_vfaezfs:
2240 case Intrinsic::s390_vfeebs:
2241 case Intrinsic::s390_vfeehs:
2242 case Intrinsic::s390_vfeefs:
2247 case Intrinsic::s390_vfeezbs:
2248 case Intrinsic::s390_vfeezhs:
2249 case Intrinsic::s390_vfeezfs:
2254 case Intrinsic::s390_vfenebs:
2255 case Intrinsic::s390_vfenehs:
2256 case Intrinsic::s390_vfenefs:
2261 case Intrinsic::s390_vfenezbs:
2262 case Intrinsic::s390_vfenezhs:
2263 case Intrinsic::s390_vfenezfs:
2268 case Intrinsic::s390_vistrbs:
2269 case Intrinsic::s390_vistrhs:
2270 case Intrinsic::s390_vistrfs:
2275 case Intrinsic::s390_vstrcbs:
2276 case Intrinsic::s390_vstrchs:
2277 case Intrinsic::s390_vstrcfs:
2282 case Intrinsic::s390_vstrczbs:
2283 case Intrinsic::s390_vstrczhs:
2284 case Intrinsic::s390_vstrczfs:
2289 case Intrinsic::s390_vstrsb:
2290 case Intrinsic::s390_vstrsh:
2291 case Intrinsic::s390_vstrsf:
2296 case Intrinsic::s390_vstrszb:
2297 case Intrinsic::s390_vstrszh:
2298 case Intrinsic::s390_vstrszf:
2303 case Intrinsic::s390_vfcedbs:
2304 case Intrinsic::s390_vfcesbs:
2309 case Intrinsic::s390_vfchdbs:
2310 case Intrinsic::s390_vfchsbs:
2315 case Intrinsic::s390_vfchedbs:
2316 case Intrinsic::s390_vfchesbs:
2321 case Intrinsic::s390_vftcidb:
2322 case Intrinsic::s390_vftcisb:
2327 case Intrinsic::s390_tdc:
2345 for (
unsigned I = 2;
I < NumOps; ++
I)
2348 assert(
Op->getNumValues() == 2 &&
"Expected only CC result and chain");
2354 return Intr.getNode();
2364 for (
unsigned I = 1;
I < NumOps; ++
I)
2368 return Intr.getNode();
2378 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \
2379 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \
2380 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X
2405 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1.getNode());
2409 int64_t
Value = ConstOp1->getSExtValue();
2425 if (!
C.Op0.hasOneUse() ||
2431 auto *Load = cast<LoadSDNode>(
C.Op0);
2432 unsigned NumBits = Load->getMemoryVT().getSizeInBits();
2433 if ((NumBits != 8 && NumBits != 16) ||
2434 NumBits != Load->getMemoryVT().getStoreSizeInBits())
2439 auto *ConstOp1 = cast<ConstantSDNode>(
C.Op1);
2441 uint64_t Mask = (1 << NumBits) - 1;
2444 int64_t SignedValue = ConstOp1->getSExtValue();
2451 }
else if (NumBits == 8) {
2477 if (
C.Op0.getValueType() != MVT::i32 ||
2478 Load->getExtensionType() != ExtType) {
2480 Load->getBasePtr(), Load->getPointerInfo(),
2481 Load->getMemoryVT(), Load->getAlign(),
2482 Load->getMemOperand()->getFlags());
2488 if (
C.Op1.getValueType() != MVT::i32 ||
2489 Value != ConstOp1->getZExtValue())
2496 auto *Load = dyn_cast<LoadSDNode>(
Op.getNode());
2499 if (Load->getMemoryVT() == MVT::i8)
2502 switch (Load->getExtensionType()) {
2519 if (
C.Op0.getValueType() == MVT::f128)
2525 if (isa<ConstantFPSDNode>(
C.Op1))
2530 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1);
2531 if (ConstOp1 && ConstOp1->getZExtValue() == 0)
2549 isUInt<16>(ConstOp1->getZExtValue()))
2554 isInt<16>(ConstOp1->getSExtValue()))
2560 unsigned Opcode0 =
C.Op0.getOpcode();
2568 cast<ConstantSDNode>(
C.Op0.getOperand(1))->getZExtValue() == 0xffffffff)
2583 ((
N->getOperand(0) ==
C.Op0 &&
N->getOperand(1) ==
C.Op1) ||
2584 (
N->getOperand(0) ==
C.Op1 &&
N->getOperand(1) ==
C.Op0))) {
2588 Flags.setNoSignedWrap(
false);
2589 Flags.setNoUnsignedWrap(
false);
2608 auto *C1 = dyn_cast<ConstantFPSDNode>(
C.Op1);
2609 if (C1 && C1->isZero()) {
2629 C.Op0.getValueType() == MVT::i64 &&
2631 cast<ConstantSDNode>(
C.Op1)->getZExtValue() == 0) {
2632 auto *C1 = dyn_cast<ConstantSDNode>(
C.Op0.getOperand(1));
2633 if (C1 && C1->getZExtValue() == 32) {
2634 SDValue ShlOp0 =
C.Op0.getOperand(0);
2638 cast<VTSDNode>(
N->getOperand(1))->getVT() == MVT::i32) {
2653 C.Op0.getOperand(0).getOpcode() ==
ISD::LOAD &&
2655 cast<ConstantSDNode>(
C.Op1)->getZExtValue() == 0) {
2656 auto *L = cast<LoadSDNode>(
C.Op0.getOperand(0));
2657 if (L->getMemoryVT().getStoreSizeInBits().getFixedValue() <=
2658 C.Op0.getValueSizeInBits().getFixedValue()) {
2659 unsigned Type = L->getExtensionType();
2662 C.Op0 =
C.Op0.getOperand(0);
2672 auto *Shift = dyn_cast<ConstantSDNode>(
N.getOperand(1));
2676 uint64_t Amount = Shift->getZExtValue();
2677 if (Amount >=
N.getValueSizeInBits())
2692 unsigned ICmpType) {
2693 assert(Mask != 0 &&
"ANDs with zero should have been removed by now");
2715 if (EffectivelyUnsigned && CmpVal > 0 && CmpVal <=
Low) {
2721 if (EffectivelyUnsigned && CmpVal <
Low) {
2729 if (CmpVal == Mask) {
2735 if (EffectivelyUnsigned && CmpVal >= Mask -
Low && CmpVal < Mask) {
2741 if (EffectivelyUnsigned && CmpVal > Mask -
Low && CmpVal <= Mask) {
2749 if (EffectivelyUnsigned && CmpVal >= Mask -
High && CmpVal <
High) {
2755 if (EffectivelyUnsigned && CmpVal > Mask -
High && CmpVal <=
High) {
2784 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1);
2787 uint64_t CmpVal = ConstOp1->getZExtValue();
2794 NewC.Op0 =
C.Op0.getOperand(0);
2795 NewC.Op1 =
C.Op0.getOperand(1);
2796 Mask = dyn_cast<ConstantSDNode>(NewC.Op1);
2799 MaskVal = Mask->getZExtValue();
2804 if (NewC.Op0.getValueType() != MVT::i64 ||
2819 MaskVal = -(CmpVal & -CmpVal);
2827 unsigned BitSize = NewC.Op0.getValueSizeInBits();
2828 unsigned NewCCMask, ShiftVal;
2830 NewC.Op0.getOpcode() ==
ISD::SHL &&
2832 (MaskVal >> ShiftVal != 0) &&
2833 ((CmpVal >> ShiftVal) << ShiftVal) == CmpVal &&
2835 MaskVal >> ShiftVal,
2838 NewC.Op0 = NewC.Op0.getOperand(0);
2839 MaskVal >>= ShiftVal;
2841 NewC.Op0.getOpcode() ==
ISD::SRL &&
2843 (MaskVal << ShiftVal != 0) &&
2844 ((CmpVal << ShiftVal) >> ShiftVal) == CmpVal &&
2846 MaskVal << ShiftVal,
2849 NewC.Op0 = NewC.Op0.getOperand(0);
2850 MaskVal <<= ShiftVal;
2861 if (Mask && Mask->getZExtValue() == MaskVal)
2866 C.CCMask = NewCCMask;
2876 auto *Mask = dyn_cast<ConstantSDNode>(
C.Op0.getOperand(1));
2880 if ((~Known.
Zero).getZExtValue() & ~Mask->getZExtValue())
2883 C.Op0 =
C.Op0.getOperand(0);
2895 C.CCValid = CCValid;
2898 C.CCMask =
CC < 4 ? 1 << (3 -
CC) : 0;
2901 C.CCMask =
CC < 4 ? ~(1 << (3 -
CC)) : -1;
2905 C.CCMask =
CC < 4 ? ~0U << (4 -
CC) : -1;
2908 C.CCMask =
CC < 4 ? ~(~0U << (4 -
CC)) : 0;
2912 C.CCMask =
CC < 4 ? ~0U << (3 -
CC) : -1;
2915 C.CCMask =
CC < 4 ? ~(~0U << (3 -
CC)) : 0;
2918 C.CCMask &= CCValid;
2926 bool IsSignaling =
false) {
2930 unsigned Opcode, CCValid;
2940 Comparison
C(CmpOp0, CmpOp1, Chain);
2942 if (
C.Op0.getValueType().isFloatingPoint()) {
2946 else if (!IsSignaling)
2968 C.CCMask &= ~SystemZ::CCMASK_CMP_UO;
2988 if (!
C.Op1.getNode()) {
2990 switch (
C.Op0.getOpcode()) {
3012 return DAG.
getNode(
C.Opcode,
DL, VTs,
C.Chain,
C.Op0,
C.Op1);
3014 return DAG.
getNode(
C.Opcode,
DL, MVT::i32,
C.Op0,
C.Op1);
3023 Op0 = DAG.
getNode(Extend,
DL, MVT::i64, Op0);
3024 Op1 = DAG.
getNode(Extend,
DL, MVT::i64, Op1);
3049 unsigned CCValid,
unsigned CCMask) {
3078 case CmpMode::Int:
return 0;
3098 case CmpMode::FP:
return 0;
3099 case CmpMode::StrictFP:
return 0;
3100 case CmpMode::SignalingFP:
return 0;
3132 int Mask[] = { Start, -1, Start + 1, -1 };
3152 !Subtarget.hasVectorEnhancements1()) {
3166 SDValue Ops[2] = { Res, NewChain };
3190 bool IsSignaling)
const {
3193 assert (!IsSignaling || Chain);
3194 CmpMode Mode = IsSignaling ? CmpMode::SignalingFP :
3195 Chain ? CmpMode::StrictFP : IsFP ? CmpMode::FP : CmpMode::Int;
3196 bool Invert =
false;
3204 assert(IsFP &&
"Unexpected integer comparison");
3206 DL, VT, CmpOp1, CmpOp0, Chain);
3208 DL, VT, CmpOp0, CmpOp1, Chain);
3212 LT.getValue(1),
GE.getValue(1));
3221 assert(IsFP &&
"Unexpected integer comparison");
3223 DL, VT, CmpOp1, CmpOp0, Chain);
3225 DL, VT, CmpOp0, CmpOp1, Chain);
3229 LT.getValue(1),
GT.getValue(1));
3238 Cmp = getVectorCmp(DAG,
Opcode,
DL, VT, CmpOp0, CmpOp1, Chain);
3242 Cmp = getVectorCmp(DAG,
Opcode,
DL, VT, CmpOp1, CmpOp0, Chain);
3247 Chain =
Cmp.getValue(1);
3255 if (Chain && Chain.
getNode() !=
Cmp.getNode()) {
3268 EVT VT =
Op.getValueType();
3270 return lowerVectorSETCC(DAG,
DL, VT,
CC, CmpOp0, CmpOp1);
3272 Comparison
C(
getCmp(DAG, CmpOp0, CmpOp1,
CC,
DL));
3279 bool IsSignaling)
const {
3285 EVT VT =
Op.getNode()->getValueType(0);
3287 SDValue Res = lowerVectorSETCC(DAG,
DL, VT,
CC, CmpOp0, CmpOp1,
3288 Chain, IsSignaling);
3292 Comparison
C(
getCmp(DAG, CmpOp0, CmpOp1,
CC,
DL, Chain, IsSignaling));
3307 Comparison
C(
getCmp(DAG, CmpOp0, CmpOp1,
CC,
DL));
3320 cast<ConstantSDNode>(Neg.
getOperand(0))->getZExtValue() == 0 &&
3346 Comparison
C(
getCmp(DAG, CmpOp0, CmpOp1,
CC,
DL));
3355 cast<ConstantSDNode>(
C.Op1)->getZExtValue() == 0) {
3363 SDValue Ops[] = {TrueOp, FalseOp,
3437 Chain = DAG.
getCopyToReg(Chain,
DL, SystemZ::R2D, GOTOffset, Glue);
3444 Node->getValueType(0),
3456 assert(Mask &&
"Missing call preserved mask for calling convention");
3471SDValue SystemZTargetLowering::lowerThreadPointer(
const SDLoc &
DL,
3503 SDValue TP = lowerThreadPointer(
DL, DAG);
3611 if (
CP->isMachineConstantPoolEntry())
3630 unsigned Depth = cast<ConstantSDNode>(
Op.getOperand(0))->getZExtValue();
3637 int BackChainIdx = TFL->getOrCreateFramePointerSaveIndex(MF);
3666 unsigned Depth = cast<ConstantSDNode>(
Op.getOperand(0))->getZExtValue();
3674 SDValue FrameAddr = lowerFRAMEADDR(
Op, DAG);
3676 int Offset = (TFL->usePackedStack(MF) ? -2 : 14) *
3693 EVT InVT =
In.getValueType();
3694 EVT ResVT =
Op.getValueType();
3699 if (
auto *LoadN = dyn_cast<LoadSDNode>(In))
3702 LoadN->getBasePtr(), LoadN->getMemOperand());
3708 if (InVT == MVT::i32 && ResVT == MVT::f32) {
3710 if (Subtarget.hasHighWord()) {
3714 MVT::i64,
SDValue(U64, 0), In);
3722 DL, MVT::f32, Out64);
3724 if (InVT == MVT::f32 && ResVT == MVT::i32) {
3727 MVT::f64,
SDValue(U64, 0), In);
3729 if (Subtarget.hasHighWord())
3743 return lowerVASTART_XPLINK(
Op, DAG);
3745 return lowerVASTART_ELF(
Op, DAG);
3760 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
3774 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
3778 const unsigned NumFields = 4;
3789 for (
unsigned I = 0;
I < NumFields; ++
I) {
3794 MemOps[
I] = DAG.
getStore(Chain,
DL, Fields[
I], FieldAddr,
3806 const Value *DstSV = cast<SrcValueSDNode>(
Op.getOperand(3))->getValue();
3807 const Value *SrcSV = cast<SrcValueSDNode>(
Op.getOperand(4))->getValue();
3813 Align(8),
false,
false,
3819SystemZTargetLowering::lowerDYNAMIC_STACKALLOC(
SDValue Op,
3822 return lowerDYNAMIC_STACKALLOC_XPLINK(
Op, DAG);
3824 return lowerDYNAMIC_STACKALLOC_ELF(
Op, DAG);
3828SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_XPLINK(
SDValue Op,
3841 (RealignOpt ? cast<ConstantSDNode>(
Align)->getZExtValue() : 0);
3844 uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
3851 if (ExtraAlignSpace)
3855 bool IsSigned =
false;
3856 bool DoesNotReturn =
false;
3857 bool IsReturnValueUsed =
false;
3858 EVT VT =
Op.getValueType();
3869 Register SPReg = Regs.getStackPointerRegister();
3880 if (ExtraAlignSpace) {
3892SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_ELF(
SDValue Op,
3907 (RealignOpt ? cast<ConstantSDNode>(
Align)->getZExtValue() : 0);
3910 uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
3922 Backchain = DAG.
getLoad(MVT::i64,
DL, Chain, getBackchainAddress(OldSP, DAG),
3926 if (ExtraAlignSpace)
3934 DAG.
getVTList(MVT::i64, MVT::Other), Chain, OldSP, NeededSpace);