26#include "llvm/IR/IntrinsicsS390.h"
35#define DEBUG_TYPE "systemz-lower"
41 cl::desc(
"Verify that narrow int args are properly extended per the "
48 : Op0(Op0In), Op1(Op1In), Chain(ChainIn),
49 Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {}
99 if (Subtarget.hasHighWord())
105 if (Subtarget.hasVector()) {
112 if (Subtarget.hasVectorEnhancements1())
117 if (Subtarget.hasVector()) {
126 if (Subtarget.hasVector())
153 for (
unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
154 I <= MVT::LAST_FP_VALUETYPE;
180 for (
unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
181 I <= MVT::LAST_INTEGER_VALUETYPE;
212 if (Subtarget.hasPopulationCount())
231 if (!Subtarget.hasFPExtension())
237 if (Subtarget.hasFPExtension())
242 if (Subtarget.hasFPExtension())
311 {MVT::i8, MVT::i16, MVT::i32},
Legal);
313 {MVT::i8, MVT::i16},
Legal);
330 if (!Subtarget.hasFPExtension()) {
343 if (Subtarget.hasMiscellaneousExtensions3()) {
436 if (VT != MVT::v2i64)
442 if (Subtarget.hasVectorEnhancements1())
473 if (Subtarget.hasVector()) {
495 if (Subtarget.hasVectorEnhancements2()) {
516 for (
unsigned I = MVT::FIRST_FP_VALUETYPE;
517 I <= MVT::LAST_FP_VALUETYPE;
525 if (Subtarget.hasFPExtension()) {
553 if (Subtarget.hasFPExtension()) {
564 if (Subtarget.hasVector()) {
610 if (Subtarget.hasVectorEnhancements1()) {
617 if (Subtarget.hasVectorEnhancements1()) {
671 for (
auto VT : { MVT::f32, MVT::f64, MVT::f128,
672 MVT::v4f32, MVT::v2f64 }) {
681 if (!Subtarget.hasVectorEnhancements1()) {
687 if (Subtarget.hasVectorEnhancements1())
697 if (Subtarget.hasVectorEnhancements1()) {
709 if (!Subtarget.hasVector()) {
775 struct RTLibCallMapping {
779 static RTLibCallMapping RTLibCallCommon[] = {
780#define HANDLE_LIBCALL(code, name) {RTLIB::code, name},
781#include "ZOSLibcallNames.def"
783 for (
auto &E : RTLibCallCommon)
789 return Subtarget.hasSoftFloat();
814 return Subtarget.hasVectorEnhancements1();
827 if (!Subtarget.hasVector() ||
828 (isFP128 && !Subtarget.hasVectorEnhancements1()))
850 if (SplatBitSize > 64)
856 if (isInt<16>(SignedValue)) {
865 if (
TII->isRxSBGMask(
Value, SplatBitSize, Start,
End)) {
887 uint64_t Lower = SplatUndefZ & maskTrailingOnes<uint64_t>(LowerBits);
888 uint64_t Upper = SplatUndefZ & maskLeadingOnes<uint64_t>(UpperBits);
895 uint64_t Middle = SplatUndefZ & ~Upper & ~Lower;
896 return tryValue(SplatBitsZ | Middle);
911 unsigned HalfSize = Width / 2;
916 if (HighValue != LowValue || 8 > HalfSize)
919 SplatBits = HighValue;
923 SplatBitSize = Width;
931 BVN->
isConstantSplat(IntBits, SplatUndef, SplatBitSize, HasAnyUndefs, 128,
935 BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, 8,
940 bool ForCodeSize)
const {
942 if (Imm.isZero() || Imm.isNegZero())
963 assert(
TRI->isTypeLegalForClass(*RC, MVT::i32) &&
"Invalid destination!");
965 Register mainDstReg =
MRI.createVirtualRegister(RC);
966 Register restoreDstReg =
MRI.createVirtualRegister(RC);
969 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
"Invalid Pointer Size!");
1022 const int64_t FPOffset = 0;
1031 unsigned LabelReg =
MRI.createVirtualRegister(PtrRC);
1047 .
addReg(SpecialRegs->getFramePointerRegister())
1055 .
addReg(SpecialRegs->getStackPointerRegister())
1063 Register BCReg =
MRI.createVirtualRegister(PtrRC);
1066 .
addReg(SpecialRegs->getStackPointerRegister())
1067 .
addImm(TFL->getBackchainOffset(*MF))
1078 MIB =
BuildMI(*thisMBB,
MI,
DL,
TII->get(SystemZ::EH_SjLj_Setup))
1103 MI.eraseFromParent();
1119 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
"Invalid Pointer Size!");
1129 const int64_t FPOffset = 0;
1141 SpecialRegs->getFramePointerRegister())
1163 SpecialRegs->getStackPointerRegister())
1172 .
addReg(SpecialRegs->getStackPointerRegister())
1173 .
addImm(TFL->getBackchainOffset(*MF))
1179 MI.eraseFromParent();
1209 if (Subtarget.hasInterlockedAccess1() &&
1223 return isInt<32>(Imm) || isUInt<32>(Imm);
1228 return isUInt<32>(Imm) || isUInt<32>(-Imm);
1250 LongDisplacement(LongDispl), IndexReg(IdxReg) {}
1273 switch (
II->getIntrinsicID()) {
1275 case Intrinsic::memset:
1276 case Intrinsic::memmove:
1277 case Intrinsic::memcpy:
1282 if (isa<LoadInst>(
I) &&
I->hasOneUse()) {
1283 auto *SingleUser = cast<Instruction>(*
I->user_begin());
1284 if (SingleUser->getParent() ==
I->getParent()) {
1285 if (isa<ICmpInst>(SingleUser)) {
1286 if (
auto *
C = dyn_cast<ConstantInt>(SingleUser->getOperand(1)))
1287 if (
C->getBitWidth() <= 64 &&
1288 (isInt<16>(
C->getSExtValue()) || isUInt<16>(
C->getZExtValue())))
1291 }
else if (isa<StoreInst>(SingleUser))
1295 }
else if (
auto *StoreI = dyn_cast<StoreInst>(
I)) {
1296 if (
auto *LoadI = dyn_cast<LoadInst>(StoreI->getValueOperand()))
1297 if (LoadI->hasOneUse() && LoadI->getParent() ==
I->getParent())
1302 if (HasVector && (isa<LoadInst>(
I) || isa<StoreInst>(
I))) {
1310 Type *MemAccessTy = (isa<LoadInst>(
I) ?
I->getType() :
1311 I->getOperand(0)->getType());
1313 bool IsVectorAccess = MemAccessTy->
isVectorTy();
1317 if (!IsVectorAccess && isa<StoreInst>(
I)) {
1318 Value *DataOp =
I->getOperand(0);
1319 if (isa<ExtractElementInst>(DataOp))
1320 IsVectorAccess =
true;
1325 if (!IsVectorAccess && isa<LoadInst>(
I) &&
I->hasOneUse()) {
1326 User *LoadUser = *
I->user_begin();
1327 if (isa<InsertElementInst>(LoadUser))
1328 IsVectorAccess =
true;
1331 if (IsFPAccess || IsVectorAccess)
1360 return AM.
Scale == 0;
1367 std::vector<EVT> &MemOps,
unsigned Limit,
const MemOp &
Op,
unsigned DstAS,
1368 unsigned SrcAS,
const AttributeList &FuncAttributes)
const {
1369 const int MVCFastLen = 16;
1371 if (Limit != ~
unsigned(0)) {
1373 if (
Op.isMemcpy() &&
Op.allowOverlap() &&
Op.size() <= MVCFastLen)
1375 if (
Op.isMemset() &&
Op.size() - 1 <= MVCFastLen)
1377 if (
Op.isZeroMemset())
1382 SrcAS, FuncAttributes);
1387 return Subtarget.hasVector() ? MVT::v2i64 : MVT::Other;
1391 if (!FromType->isIntegerTy() || !ToType->
isIntegerTy())
1393 unsigned FromBits = FromType->getPrimitiveSizeInBits().getFixedValue();
1395 return FromBits > ToBits;
1403 return FromBits > ToBits;
1412 if (Constraint.
size() == 1) {
1413 switch (Constraint[0]) {
1439 }
else if (Constraint.
size() == 2 && Constraint[0] ==
'Z') {
1440 switch (Constraint[1]) {
1456 const char *constraint)
const {
1458 Value *CallOperandVal =
info.CallOperandVal;
1461 if (!CallOperandVal)
1465 switch (*constraint) {
1483 if (Subtarget.hasVector())
1489 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1490 if (isUInt<8>(
C->getZExtValue()))
1495 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1496 if (isUInt<12>(
C->getZExtValue()))
1501 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1502 if (isInt<16>(
C->getSExtValue()))
1507 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1508 if (isInt<20>(
C->getSExtValue()))
1513 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1514 if (
C->getZExtValue() == 0x7fffffff)
1524static std::pair<unsigned, const TargetRegisterClass *>
1526 const unsigned *Map,
unsigned Size) {
1527 assert(*(Constraint.
end()-1) ==
'}' &&
"Missing '}'");
1528 if (isdigit(Constraint[2])) {
1533 return std::make_pair(Map[Index], RC);
1535 return std::make_pair(0U,
nullptr);
1538std::pair<unsigned, const TargetRegisterClass *>
1541 if (Constraint.
size() == 1) {
1543 switch (Constraint[0]) {
1548 return std::make_pair(0U, &SystemZ::GR64BitRegClass);
1550 return std::make_pair(0U, &SystemZ::GR128BitRegClass);
1551 return std::make_pair(0U, &SystemZ::GR32BitRegClass);
1555 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass);
1556 else if (VT == MVT::i128)
1557 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass);
1558 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass);
1561 return std::make_pair(0U, &SystemZ::GRH32BitRegClass);
1566 return std::make_pair(0U, &SystemZ::FP64BitRegClass);
1568 return std::make_pair(0U, &SystemZ::FP128BitRegClass);
1569 return std::make_pair(0U, &SystemZ::FP32BitRegClass);
1574 if (Subtarget.hasVector()) {
1576 return std::make_pair(0U, &SystemZ::VR32BitRegClass);
1578 return std::make_pair(0U, &SystemZ::VR64BitRegClass);
1579 return std::make_pair(0U, &SystemZ::VR128BitRegClass);
1588 auto getVTSizeInBits = [&VT]() {
1596 if (Constraint[1] ==
'r') {
1597 if (getVTSizeInBits() == 32)
1600 if (getVTSizeInBits() == 128)
1606 if (Constraint[1] ==
'f') {
1608 return std::make_pair(
1610 if (getVTSizeInBits() == 32)
1613 if (getVTSizeInBits() == 128)
1619 if (Constraint[1] ==
'v') {
1620 if (!Subtarget.hasVector())
1621 return std::make_pair(
1623 if (getVTSizeInBits() == 32)
1626 if (getVTSizeInBits() == 64)
1644 : SystemZ::NoRegister)
1646 Subtarget.
isTargetELF() ? SystemZ::R15D : SystemZ::NoRegister)
1647 .
Default(SystemZ::NoRegister);
1655 const Constant *PersonalityFn)
const {
1660 const Constant *PersonalityFn)
const {
1668 if (Constraint.
size() == 1) {
1669 switch (Constraint[0]) {
1671 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1672 if (isUInt<8>(
C->getZExtValue()))
1674 Op.getValueType()));
1678 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1679 if (isUInt<12>(
C->getZExtValue()))
1681 Op.getValueType()));
1685 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1686 if (isInt<16>(
C->getSExtValue()))
1688 C->getSExtValue(),
SDLoc(
Op),
Op.getValueType()));
1692 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1693 if (isInt<20>(
C->getSExtValue()))
1695 C->getSExtValue(),
SDLoc(
Op),
Op.getValueType()));
1699 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1700 if (
C->getZExtValue() == 0x7fffffff)
1702 Op.getValueType()));
1713#include "SystemZGenCallingConv.inc"
1717 static const MCPhysReg ScratchRegs[] = { SystemZ::R0D, SystemZ::R1D,
1723 Type *ToType)
const {
1786 if (BitCastToType == MVT::v2i64)
1813 MVT::Untyped,
Hi,
Lo);
1837 unsigned NumParts,
MVT PartVT, std::optional<CallingConv::ID>
CC)
const {
1839 if (ValueVT.
getSizeInBits() == 128 && NumParts == 1 && PartVT == MVT::Untyped) {
1850 MVT PartVT,
EVT ValueVT, std::optional<CallingConv::ID>
CC)
const {
1851 if (ValueVT.
getSizeInBits() == 128 && NumParts == 1 && PartVT == MVT::Untyped) {
1878 unsigned NumFixedGPRs = 0;
1879 unsigned NumFixedFPRs = 0;
1880 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
1893 RC = &SystemZ::GR32BitRegClass;
1897 RC = &SystemZ::GR64BitRegClass;
1901 RC = &SystemZ::FP32BitRegClass;
1905 RC = &SystemZ::FP64BitRegClass;
1909 RC = &SystemZ::FP128BitRegClass;
1917 RC = &SystemZ::VR128BitRegClass;
1946 ArgValue = DAG.
getLoad(LocVT,
DL, Chain, FIN,
1957 unsigned ArgIndex = Ins[
I].OrigArgIndex;
1958 assert (Ins[
I].PartOffset == 0);
1959 while (
I + 1 != E && Ins[
I + 1].OrigArgIndex == ArgIndex) {
1961 unsigned PartOffset = Ins[
I + 1].PartOffset;
1984 int64_t VarArgOffset = CCInfo.
getStackSize() + Regs->getCallFrameSize();
2002 int64_t RegSaveOffset =
2017 &SystemZ::FP64BitRegClass);
2035 MRI.addLiveIn(Regs->getADARegister(), ADAvReg);
2047 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
2054 if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D)
2056 if (Outs[
I].Flags.isSwiftSelf() || Outs[
I].Flags.isSwiftError())
2063 unsigned Offset,
bool LoadAdr =
false) {
2086 bool LoadAddr =
false;
2087 const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV);
2108 unsigned ADADelta = 0;
2109 unsigned EPADelta = 8;
2114 if (
auto *
G = dyn_cast<GlobalAddressSDNode>(Callee)) {
2115 bool IsInternal = (
G->getGlobal()->hasInternalLinkage() ||
2116 G->getGlobal()->hasPrivateLinkage());
2131 }
else if (
auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
2173 verifyNarrowIntegerArgs_Call(Outs, &MF.
getFunction(), Callee);
2196 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
2202 unsigned ArgIndex = Outs[
I].OrigArgIndex;
2204 if (
I + 1 != E && Outs[
I + 1].OrigArgIndex == ArgIndex) {
2206 Type *OrigArgType = CLI.
Args[Outs[
I].OrigArgIndex].Ty;
2212 SlotVT = Outs[
I].VT;
2215 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
2221 assert (Outs[
I].PartOffset == 0);
2222 while (
I + 1 != E && Outs[
I + 1].OrigArgIndex == ArgIndex) {
2223 SDValue PartValue = OutVals[
I + 1];
2224 unsigned PartOffset = Outs[
I + 1].PartOffset;
2231 SlotVT.
getStoreSize()) &&
"Not enough space for argument part!");
2234 ArgValue = SpillSlot;
2251 if (!StackPtr.getNode())
2273 RegsToPass.
push_back(std::make_pair(SystemZ::R3D, ShadowArgValue));
2279 if (!MemOpChains.
empty())
2292 ->getAddressOfCalleeRegister();
2295 Callee = DAG.
getRegister(CalleeReg, Callee.getValueType());
2300 if (
auto *
G = dyn_cast<GlobalAddressSDNode>(Callee)) {
2303 }
else if (
auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
2306 }
else if (IsTailCall) {
2309 Callee = DAG.
getRegister(SystemZ::R1D, Callee.getValueType());
2314 for (
unsigned I = 0, E = RegsToPass.
size();
I != E; ++
I) {
2316 RegsToPass[
I].second, Glue);
2327 for (
unsigned I = 0, E = RegsToPass.
size();
I != E; ++
I)
2329 RegsToPass[
I].second.getValueType()));
2333 const uint32_t *Mask =
TRI->getCallPreservedMask(MF, CallConv);
2334 assert(Mask &&
"Missing call preserved mask for calling convention");
2358 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, Ctx);
2365 VA.getLocVT(), Glue);
2382 bool DoesNotReturn,
bool IsReturnValueUsed)
const {
2384 Args.reserve(Ops.
size());
2389 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.
getContext());
2391 Entry.IsZExt = !Entry.IsSExt;
2392 Args.push_back(Entry);
2418 for (
auto &Out : Outs)
2419 if (Out.ArgVT == MVT::i128)
2423 CCState RetCCInfo(CallConv, isVarArg, MF, RetLocs, Context);
2424 return RetCCInfo.
CheckReturn(Outs, RetCC_SystemZ);
2436 verifyNarrowIntegerArgs_Ret(Outs, &MF.
getFunction());
2444 if (RetLocs.
empty())
2454 for (
unsigned I = 0, E = RetLocs.
size();
I != E; ++
I) {
2483 unsigned &CCValid) {
2484 unsigned Id =
Op.getConstantOperandVal(1);
2486 case Intrinsic::s390_tbegin:
2491 case Intrinsic::s390_tbegin_nofloat:
2496 case Intrinsic::s390_tend:
2510 unsigned Id =
Op.getConstantOperandVal(0);
2512 case Intrinsic::s390_vpkshs:
2513 case Intrinsic::s390_vpksfs:
2514 case Intrinsic::s390_vpksgs:
2519 case Intrinsic::s390_vpklshs:
2520 case Intrinsic::s390_vpklsfs:
2521 case Intrinsic::s390_vpklsgs:
2526 case Intrinsic::s390_vceqbs:
2527 case Intrinsic::s390_vceqhs:
2528 case Intrinsic::s390_vceqfs:
2529 case Intrinsic::s390_vceqgs:
2534 case Intrinsic::s390_vchbs:
2535 case Intrinsic::s390_vchhs:
2536 case Intrinsic::s390_vchfs:
2537 case Intrinsic::s390_vchgs:
2542 case Intrinsic::s390_vchlbs:
2543 case Intrinsic::s390_vchlhs:
2544 case Intrinsic::s390_vchlfs:
2545 case Intrinsic::s390_vchlgs:
2550 case Intrinsic::s390_vtm:
2555 case Intrinsic::s390_vfaebs:
2556 case Intrinsic::s390_vfaehs:
2557 case Intrinsic::s390_vfaefs:
2562 case Intrinsic::s390_vfaezbs:
2563 case Intrinsic::s390_vfaezhs:
2564 case Intrinsic::s390_vfaezfs:
2569 case Intrinsic::s390_vfeebs:
2570 case Intrinsic::s390_vfeehs:
2571 case Intrinsic::s390_vfeefs:
2576 case Intrinsic::s390_vfeezbs:
2577 case Intrinsic::s390_vfeezhs:
2578 case Intrinsic::s390_vfeezfs:
2583 case Intrinsic::s390_vfenebs:
2584 case Intrinsic::s390_vfenehs:
2585 case Intrinsic::s390_vfenefs:
2590 case Intrinsic::s390_vfenezbs:
2591 case Intrinsic::s390_vfenezhs:
2592 case Intrinsic::s390_vfenezfs:
2597 case Intrinsic::s390_vistrbs:
2598 case Intrinsic::s390_vistrhs:
2599 case Intrinsic::s390_vistrfs:
2604 case Intrinsic::s390_vstrcbs:
2605 case Intrinsic::s390_vstrchs:
2606 case Intrinsic::s390_vstrcfs:
2611 case Intrinsic::s390_vstrczbs:
2612 case Intrinsic::s390_vstrczhs:
2613 case Intrinsic::s390_vstrczfs:
2618 case Intrinsic::s390_vstrsb:
2619 case Intrinsic::s390_vstrsh:
2620 case Intrinsic::s390_vstrsf:
2625 case Intrinsic::s390_vstrszb:
2626 case Intrinsic::s390_vstrszh:
2627 case Intrinsic::s390_vstrszf:
2632 case Intrinsic::s390_vfcedbs:
2633 case Intrinsic::s390_vfcesbs:
2638 case Intrinsic::s390_vfchdbs:
2639 case Intrinsic::s390_vfchsbs:
2644 case Intrinsic::s390_vfchedbs:
2645 case Intrinsic::s390_vfchesbs:
2650 case Intrinsic::s390_vftcidb:
2651 case Intrinsic::s390_vftcisb:
2656 case Intrinsic::s390_tdc:
2674 for (
unsigned I = 2;
I < NumOps; ++
I)
2677 assert(
Op->getNumValues() == 2 &&
"Expected only CC result and chain");
2683 return Intr.getNode();
2693 for (
unsigned I = 1;
I < NumOps; ++
I)
2697 return Intr.getNode();
2707 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \
2708 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \
2709 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X
2734 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1.getNode());
2735 if (!ConstOp1 || ConstOp1->getValueSizeInBits(0) > 64)
2738 int64_t
Value = ConstOp1->getSExtValue();
2754 if (!
C.Op0.hasOneUse() ||
2760 auto *Load = cast<LoadSDNode>(
C.Op0);
2761 unsigned NumBits = Load->getMemoryVT().getSizeInBits();
2762 if ((NumBits != 8 && NumBits != 16) ||
2763 NumBits != Load->getMemoryVT().getStoreSizeInBits())
2768 auto *ConstOp1 = cast<ConstantSDNode>(
C.Op1);
2769 if (!ConstOp1 || ConstOp1->getValueSizeInBits(0) > 64)
2772 uint64_t Mask = (1 << NumBits) - 1;
2775 int64_t SignedValue = ConstOp1->getSExtValue();
2782 }
else if (NumBits == 8) {
2808 if (
C.Op0.getValueType() != MVT::i32 ||
2809 Load->getExtensionType() != ExtType) {
2811 Load->getBasePtr(), Load->getPointerInfo(),
2812 Load->getMemoryVT(), Load->getAlign(),
2813 Load->getMemOperand()->getFlags());
2819 if (
C.Op1.getValueType() != MVT::i32 ||
2820 Value != ConstOp1->getZExtValue())
2827 auto *Load = dyn_cast<LoadSDNode>(
Op.getNode());
2830 if (Load->getMemoryVT() == MVT::i8)
2833 switch (Load->getExtensionType()) {
2850 if (
C.Op0.getValueType() == MVT::i128)
2852 if (
C.Op0.getValueType() == MVT::f128)
2858 if (isa<ConstantFPSDNode>(
C.Op1))
2863 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1);
2864 if (ConstOp1 && ConstOp1->getZExtValue() == 0)
2882 isUInt<16>(ConstOp1->getZExtValue()))
2887 isInt<16>(ConstOp1->getSExtValue()))
2893 unsigned Opcode0 =
C.Op0.getOpcode();
2900 C.Op0.getConstantOperandVal(1) == 0xffffffff)
2915 ((
N->getOperand(0) ==
C.Op0 &&
N->getOperand(1) ==
C.Op1) ||
2916 (
N->getOperand(0) ==
C.Op1 &&
N->getOperand(1) ==
C.Op0))) {
2937 auto *C1 = dyn_cast<ConstantFPSDNode>(
C.Op1);
2938 if (C1 && C1->isZero()) {
2957 if (
C.Op0.getOpcode() ==
ISD::SHL &&
C.Op0.getValueType() == MVT::i64 &&
2959 auto *C1 = dyn_cast<ConstantSDNode>(
C.Op0.getOperand(1));
2960 if (C1 && C1->getZExtValue() == 32) {
2961 SDValue ShlOp0 =
C.Op0.getOperand(0);
2965 cast<VTSDNode>(
N->getOperand(1))->getVT() == MVT::i32) {
2980 C.Op0.getOperand(0).getOpcode() ==
ISD::LOAD &&
2982 cast<ConstantSDNode>(
C.Op1)->getValueSizeInBits(0) <= 64 &&
2983 C.Op1->getAsZExtVal() == 0) {
2984 auto *L = cast<LoadSDNode>(
C.Op0.getOperand(0));
2985 if (L->getMemoryVT().getStoreSizeInBits().getFixedValue() <=
2986 C.Op0.getValueSizeInBits().getFixedValue()) {
2987 unsigned Type = L->getExtensionType();
2990 C.Op0 =
C.Op0.getOperand(0);
3000 auto *Shift = dyn_cast<ConstantSDNode>(
N.getOperand(1));
3004 uint64_t Amount = Shift->getZExtValue();
3005 if (Amount >=
N.getValueSizeInBits())
3020 unsigned ICmpType) {
3021 assert(Mask != 0 &&
"ANDs with zero should have been removed by now");
3043 if (EffectivelyUnsigned && CmpVal > 0 && CmpVal <=
Low) {
3049 if (EffectivelyUnsigned && CmpVal <
Low) {
3057 if (CmpVal == Mask) {
3063 if (EffectivelyUnsigned && CmpVal >= Mask -
Low && CmpVal < Mask) {
3069 if (EffectivelyUnsigned && CmpVal > Mask -
Low && CmpVal <= Mask) {
3077 if (EffectivelyUnsigned && CmpVal >= Mask -
High && CmpVal <
High) {
3083 if (EffectivelyUnsigned && CmpVal > Mask -
High && CmpVal <=
High) {
3112 if (
C.Op0.getValueType() == MVT::i128) {
3117 auto *Mask = dyn_cast<ConstantSDNode>(
C.Op1);
3118 if (Mask && Mask->getAPIntValue() == 0) {
3133 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1);
3136 uint64_t CmpVal = ConstOp1->getZExtValue();
3143 NewC.Op0 =
C.Op0.getOperand(0);
3144 NewC.Op1 =
C.Op0.getOperand(1);
3145 Mask = dyn_cast<ConstantSDNode>(NewC.Op1);
3148 MaskVal = Mask->getZExtValue();
3153 if (NewC.Op0.getValueType() != MVT::i64 ||
3168 MaskVal = -(CmpVal & -CmpVal);
3176 unsigned BitSize = NewC.Op0.getValueSizeInBits();
3177 unsigned NewCCMask, ShiftVal;
3179 NewC.Op0.getOpcode() ==
ISD::SHL &&
3181 (MaskVal >> ShiftVal != 0) &&
3182 ((CmpVal >> ShiftVal) << ShiftVal) == CmpVal &&
3184 MaskVal >> ShiftVal,
3187 NewC.Op0 = NewC.Op0.getOperand(0);
3188 MaskVal >>= ShiftVal;
3190 NewC.Op0.getOpcode() ==
ISD::SRL &&
3192 (MaskVal << ShiftVal != 0) &&
3193 ((CmpVal << ShiftVal) >> ShiftVal) == CmpVal &&
3195 MaskVal << ShiftVal,
3198 NewC.Op0 = NewC.Op0.getOperand(0);
3199 MaskVal <<= ShiftVal;
3210 if (Mask && Mask->getZExtValue() == MaskVal)
3215 C.CCMask = NewCCMask;
3223 if (
C.Op0.getValueType() != MVT::i128)
3241 bool Swap =
false, Invert =
false;
3260 C.CCMask ^=
C.CCValid;
3270 auto *Mask = dyn_cast<ConstantSDNode>(
C.Op0.getOperand(1));
3271 if (!Mask || Mask->getValueSizeInBits(0) > 64)
3274 if ((~Known.
Zero).getZExtValue() & ~Mask->getZExtValue())
3277 C.Op0 =
C.Op0.getOperand(0);
3289 C.CCValid = CCValid;
3292 C.CCMask =
CC < 4 ? 1 << (3 -
CC) : 0;
3295 C.CCMask =
CC < 4 ? ~(1 << (3 -
CC)) : -1;
3299 C.CCMask =
CC < 4 ? ~0U << (4 -
CC) : -1;
3302 C.CCMask =
CC < 4 ? ~(~0U << (4 -
CC)) : 0;
3306 C.CCMask =
CC < 4 ? ~0U << (3 -
CC) : -1;
3309 C.CCMask =
CC < 4 ? ~(~0U << (3 -
CC)) : 0;
3312 C.CCMask &= CCValid;
3320 bool IsSignaling =
false) {
3323 unsigned Opcode, CCValid;
3335 Comparison
C(CmpOp0, CmpOp1, Chain);
3337 if (
C.Op0.getValueType().isFloatingPoint()) {
3341 else if (!IsSignaling)
3363 C.CCMask &= ~SystemZ::CCMASK_CMP_UO;
3384 if (!
C.Op1.getNode()) {
3386 switch (
C.Op0.getOpcode()) {
3413 return DAG.
getNode(
C.Opcode,
DL, VTs,
C.Chain,
C.Op0,
C.Op1);
3415 return DAG.
getNode(
C.Opcode,
DL, MVT::i32,
C.Op0,
C.Op1);
3424 Op0 = DAG.
getNode(Extend,
DL, MVT::i64, Op0);
3425 Op1 = DAG.
getNode(Extend,
DL, MVT::i64, Op1);
3450 unsigned CCValid,
unsigned CCMask) {
3479 case CmpMode::Int:
return 0;
3499 case CmpMode::FP:
return 0;
3500 case CmpMode::StrictFP:
return 0;
3501 case CmpMode::SignalingFP:
return 0;
3533 int Mask[] = { Start, -1, Start + 1, -1 };
3553 !Subtarget.hasVectorEnhancements1()) {
3567 SDValue Ops[2] = { Res, NewChain };
3576 return DAG.
getNode(Opcode,
DL, VTs, Chain, CmpOp0, CmpOp1);
3578 return DAG.
getNode(Opcode,
DL, VT, CmpOp0, CmpOp1);
3591 bool IsSignaling)
const {
3594 assert (!IsSignaling || Chain);
3595 CmpMode Mode = IsSignaling ? CmpMode::SignalingFP :
3596 Chain ? CmpMode::StrictFP : IsFP ? CmpMode::FP : CmpMode::Int;
3597 bool Invert =
false;
3605 assert(IsFP &&
"Unexpected integer comparison");
3607 DL, VT, CmpOp1, CmpOp0, Chain);
3609 DL, VT, CmpOp0, CmpOp1, Chain);
3613 LT.getValue(1),
GE.getValue(1));
3622 assert(IsFP &&
"Unexpected integer comparison");
3624 DL, VT, CmpOp1, CmpOp0, Chain);
3626 DL, VT, CmpOp0, CmpOp1, Chain);
3630 LT.getValue(1),
GT.getValue(1));
3639 Cmp = getVectorCmp(DAG, Opcode,
DL, VT, CmpOp0, CmpOp1, Chain);
3643 Cmp = getVectorCmp(DAG, Opcode,
DL, VT, CmpOp1, CmpOp0, Chain);
3648 Chain =
Cmp.getValue(1);
3656 if (Chain && Chain.
getNode() !=
Cmp.getNode()) {
3669 EVT VT =
Op.getValueType();
3671 return lowerVectorSETCC(DAG,
DL, VT,
CC, CmpOp0, CmpOp1);
3680 bool IsSignaling)
const {
3686 EVT VT =
Op.getNode()->getValueType(0);
3688 SDValue Res = lowerVectorSETCC(DAG,
DL, VT,
CC, CmpOp0, CmpOp1,
3689 Chain, IsSignaling);
3753 cast<ConstantSDNode>(
C.Op1)->getValueSizeInBits(0) <= 64 &&
3754 C.Op1->getAsZExtVal() == 0) {
3762 SDValue Ops[] = {TrueOp, FalseOp,
3836 Chain = DAG.
getCopyToReg(Chain,
DL, SystemZ::R2D, GOTOffset, Glue);
3843 Node->getValueType(0),
3855 assert(Mask &&
"Missing call preserved mask for calling convention");
3863 Chain = DAG.
getNode(Opcode,
DL, NodeTys, Ops);
3870SDValue SystemZTargetLowering::lowerThreadPointer(
const SDLoc &
DL,
3902 SDValue TP = lowerThreadPointer(
DL, DAG);
4010 if (
CP->isMachineConstantPoolEntry())
4029 unsigned Depth =
Op.getConstantOperandVal(0);
4036 int BackChainIdx = TFL->getOrCreateFramePointerSaveIndex(MF);
4065 unsigned Depth =
Op.getConstantOperandVal(0);
4073 SDValue FrameAddr = lowerFRAMEADDR(
Op, DAG);
4075 int Offset = TFL->getReturnAddressOffset(MF);
4086 &SystemZ::GR64BitRegClass);
4094 EVT InVT =
In.getValueType();
4095 EVT ResVT =
Op.getValueType();
4100 if (
auto *LoadN = dyn_cast<LoadSDNode>(In))
4103 LoadN->getBasePtr(), LoadN->getMemOperand());
4109 if (InVT == MVT::i32 && ResVT == MVT::f32) {
4111 if (Subtarget.hasHighWord()) {
4115 MVT::i64,
SDValue(U64, 0), In);
4123 DL, MVT::f32, Out64);
4125 if (InVT == MVT::f32 && ResVT == MVT::i32) {
4128 MVT::f64,
SDValue(U64, 0), In);
4130 if (Subtarget.hasHighWord())
4144 return lowerVASTART_XPLINK(
Op, DAG);
4146 return lowerVASTART_ELF(
Op, DAG);
4161 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
4175 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
4179 const unsigned NumFields = 4;
4190 for (
unsigned I = 0;
I < NumFields; ++
I) {
4195 MemOps[
I] = DAG.
getStore(Chain,
DL, Fields[
I], FieldAddr,
4207 const Value *DstSV = cast<SrcValueSDNode>(
Op.getOperand(3))->getValue();
4208 const Value *SrcSV = cast<SrcValueSDNode>(
Op.getOperand(4))->getValue();
4214 Align(8),
false,
false,
4220SystemZTargetLowering::lowerDYNAMIC_STACKALLOC(
SDValue Op,
4223 return lowerDYNAMIC_STACKALLOC_XPLINK(
Op, DAG);
4225 return lowerDYNAMIC_STACKALLOC_ELF(
Op, DAG);
4229SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_XPLINK(
SDValue Op,
4241 uint64_t AlignVal = (RealignOpt ?
Align->getAsZExtVal() : 0);
4244 uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
4245 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;
4251 if (ExtraAlignSpace)
4255 bool IsSigned =
false;
4256 bool DoesNotReturn =
false;
4257 bool IsReturnValueUsed =
false;
4258 EVT VT =
Op.getValueType();
4280 if (ExtraAlignSpace) {
4292SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_ELF(
SDValue Op,
4306 uint64_t AlignVal = (RealignOpt ?
Align->getAsZExtVal() : 0);
4309 uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
4310 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;
4321 Backchain = DAG.
getLoad(MVT::i64,
DL, Chain, getBackchainAddress(OldSP, DAG),
4325 if (ExtraAlignSpace)
4333 DAG.
getVTList(MVT::i64, MVT::Other), Chain, OldSP, NeededSpace);
4349 if (RequiredAlign > StackAlign) {
4359 Chain = DAG.
getStore(Chain,
DL, Backchain, getBackchainAddress(NewSP, DAG),
4366SDValue SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET(
4375 EVT VT =
Op.getValueType();
4382 Op.getOperand(1), Ops[1], Ops[0]);
4383 else if (Subtarget.hasMiscellaneousExtensions2())
4388 Op.getOperand(0),
Op.getOperand(1), Ops[1], Ops[0]);
4412 LL, RL, Ops[1], Ops[0]);
4423 EVT VT =
Op.getValueType();
4430 Op.getOperand(1), Ops[1], Ops[0]);
4436 Op.getOperand(0),
Op.getOperand(1), Ops[1], Ops[0]);
4444 EVT VT =
Op.getValueType();
4464 EVT VT =
Op.getValueType();
4471 Op.getOperand(0),
Op.getOperand(1), Ops[1], Ops[0]);
4476 assert(
Op.getValueType() == MVT::i64 &&
"Should be 64-bit operation");
4479 SDValue Ops[] = {
Op.getOperand(0),
Op.getOperand(1)};
4488 if ((Masks[0] >> 32) == 0xffffffff &&
uint32_t(Masks[1]) == 0xffffffff)
4490 else if ((Masks[1] >> 32) == 0xffffffff &&
uint32_t(Masks[0]) == 0xffffffff)
4506 if (!isInt<16>(
Value))
4527 MVT::i64, HighOp, Low32);
4538 if (
N->getValueType(0) == MVT::i128) {
4539 unsigned BaseOp = 0;
4540 unsigned FlagOp = 0;
4541 bool IsBorrow =
false;
4542 switch (
Op.getOpcode()) {
4565 unsigned BaseOp = 0;
4566 unsigned CCValid = 0;
4567 unsigned CCMask = 0;
4569 switch (
Op.getOpcode()) {
4597 if (
N->getValueType(1) == MVT::i1)
4620 MVT VT =
N->getSimpleValueType(0);
4631 if (VT == MVT::i128) {
4632 unsigned BaseOp = 0;
4633 unsigned FlagOp = 0;
4634 bool IsBorrow =
false;
4635 switch (
Op.getOpcode()) {
4662 unsigned BaseOp = 0;
4663 unsigned CCValid = 0;
4664 unsigned CCMask = 0;
4666 switch (
Op.getOpcode()) {
4695 if (
N->getValueType(1) == MVT::i1)
4703 EVT VT =
Op.getValueType();
4705 Op =
Op.getOperand(0);
4753 if (NumSignificantBits == 0)
4759 BitSize = std::min(BitSize, OrigBitSize);
4768 for (int64_t
I = BitSize / 2;
I >= 8;
I =
I / 2) {
4770 if (BitSize != OrigBitSize)
4807 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
4809 (
Node->getMemoryVT() == MVT::i128 ||
Node->getMemoryVT() == MVT::f128) &&
4810 "Only custom lowering i128 or f128.");
4822 EVT PtrVT =
Addr.getValueType();
4823 EVT WideVT = MVT::i32;
4846 unsigned Opcode)
const {
4847 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
4850 EVT NarrowVT =
Node->getMemoryVT();
4851 EVT WideVT = MVT::i32;
4852 if (NarrowVT == WideVT)
4864 if (
auto *Const = dyn_cast<ConstantSDNode>(Src2)) {
4870 SDValue AlignedAddr, BitShift, NegBitShift;
4888 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift,
4907 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
4908 EVT MemVT =
Node->getMemoryVT();
4909 if (MemVT == MVT::i32 || MemVT == MVT::i64) {
4911 assert(
Op.getValueType() == MemVT &&
"Mismatched VTs");
4912 assert(Subtarget.hasInterlockedAccess1() &&
4913 "Should have been expanded by AtomicExpand pass.");
4919 Node->getChain(),
Node->getBasePtr(), NegSrc2,
4920 Node->getMemOperand());
4929 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
4937 if (
Node->getMemoryVT() == MVT::i128) {
4946 EVT NarrowVT =
Node->getMemoryVT();
4947 EVT WideVT = NarrowVT == MVT::i64 ? MVT::i64 : MVT::i32;
4948 if (NarrowVT == WideVT) {
4950 SDValue Ops[] = { ChainIn,
Addr, CmpVal, SwapVal };
4952 DL, Tys, Ops, NarrowVT, MMO);
4966 SDValue AlignedAddr, BitShift, NegBitShift;
4971 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift,
4974 VTList, Ops, NarrowVT, MMO);
4988SystemZTargetLowering::getTargetMMOFlags(
const Instruction &
I)
const {
4993 if (
auto *SI = dyn_cast<StoreInst>(&
I))
4996 if (
auto *LI = dyn_cast<LoadInst>(&
I))
4999 if (
auto *AI = dyn_cast<AtomicRMWInst>(&
I))
5002 if (
auto *AI = dyn_cast<AtomicCmpXchgInst>(&
I))
5014 "in GHC calling convention");
5016 Regs->getStackPointerRegister(),
Op.getValueType());
5027 "in GHC calling convention");
5034 if (StoreBackchain) {
5036 Chain,
DL, Regs->getStackPointerRegister(), MVT::i64);
5037 Backchain = DAG.
getLoad(MVT::i64,
DL, Chain, getBackchainAddress(OldSP, DAG),
5041 Chain = DAG.
getCopyToReg(Chain,
DL, Regs->getStackPointerRegister(), NewSP);
5044 Chain = DAG.
getStore(Chain,
DL, Backchain, getBackchainAddress(NewSP, DAG),
5052 bool IsData =
Op.getConstantOperandVal(4);
5055 return Op.getOperand(0);
5058 bool IsWrite =
Op.getConstantOperandVal(2);
5060 auto *
Node = cast<MemIntrinsicSDNode>(
Op.getNode());
5064 Node->getVTList(), Ops,
5065 Node->getMemoryVT(),
Node->getMemOperand());
5077SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(
SDValue Op,
5079 unsigned Opcode, CCValid;
5081 assert(
Op->getNumValues() == 2 &&
"Expected only CC result and chain");
5092SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(
SDValue Op,
5094 unsigned Opcode, CCValid;
5097 if (
Op->getNumValues() == 1)
5099 assert(
Op->getNumValues() == 2 &&
"Expected a CC and non-CC result");
5104 unsigned Id =
Op.getConstantOperandVal(0);
5106 case Intrinsic::thread_pointer:
5107 return lowerThreadPointer(
SDLoc(
Op), DAG);
5109 case Intrinsic::s390_vpdi:
5111 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5113 case Intrinsic::s390_vperm:
5115 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5117 case Intrinsic::s390_vuphb:
5118 case Intrinsic::s390_vuphh:
5119 case Intrinsic::s390_vuphf:
5123 case Intrinsic::s390_vuplhb:
5124 case Intrinsic::s390_vuplhh:
5125 case Intrinsic::s390_vuplhf:
5129 case Intrinsic::s390_vuplb:
5130 case Intrinsic::s390_vuplhw:
5131 case Intrinsic::s390_vuplf:
5135 case Intrinsic::s390_vupllb:
5136 case Intrinsic::s390_vupllh:
5137 case Intrinsic::s390_vupllf:
5141 case Intrinsic::s390_vsumb:
5142 case Intrinsic::s390_vsumh:
5143 case Intrinsic::s390_vsumgh:
5144 case Intrinsic::s390_vsumgf:
5145 case Intrinsic::s390_vsumqf:
5146 case Intrinsic::s390_vsumqg:
5148 Op.getOperand(1),
Op.getOperand(2));
5150 case Intrinsic::s390_vaq:
5152 Op.getOperand(1),
Op.getOperand(2));
5153 case Intrinsic::s390_vaccb:
5154 case Intrinsic::s390_vacch:
5155 case Intrinsic::s390_vaccf:
5156 case Intrinsic::s390_vaccg:
5157 case Intrinsic::s390_vaccq:
5159 Op.getOperand(1),
Op.getOperand(2));
5160 case Intrinsic::s390_vacq:
5162 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5163 case Intrinsic::s390_vacccq:
5165 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5167 case Intrinsic::s390_vsq:
5169 Op.getOperand(1),
Op.getOperand(2));
5170 case Intrinsic::s390_vscbib:
5171 case Intrinsic::s390_vscbih:
5172 case Intrinsic::s390_vscbif:
5173 case Intrinsic::s390_vscbig:
5174 case Intrinsic::s390_vscbiq:
5176 Op.getOperand(1),
Op.getOperand(2));
5177 case Intrinsic::s390_vsbiq:
5179 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5180 case Intrinsic::s390_vsbcbiq:
5182 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
5203 { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 } },
5206 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
5209 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
5212 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
5215 { 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 } },
5218 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
5221 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
5224 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
5227 { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 } },
5230 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
5233 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
5236 { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } },
5239 { 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 } }
5253 OpNo0 = OpNo1 = OpNos[1];
5254 }
else if (OpNos[1] < 0) {
5255 OpNo0 = OpNo1 = OpNos[0];
5273 unsigned &OpNo0,
unsigned &OpNo1) {
5274 int OpNos[] = { -1, -1 };
5287 if (OpNos[ModelOpNo] == 1 - RealOpNo)
5289 OpNos[ModelOpNo] = RealOpNo;
5297 unsigned &OpNo0,
unsigned &OpNo1) {
5314 int Elt = Bytes[
From];
5317 Transform[
From] = -1;
5319 while (
P.Bytes[To] != Elt) {
5324 Transform[
From] = To;
5347 if (
auto *VSN = dyn_cast<ShuffleVectorSDNode>(ShuffleOp)) {
5348 Bytes.
resize(NumElements * BytesPerElement, -1);
5349 for (
unsigned I = 0;
I < NumElements; ++
I) {
5350 int Index = VSN->getMaskElt(
I);
5352 for (
unsigned J = 0; J < BytesPerElement; ++J)
5353 Bytes[
I * BytesPerElement + J] = Index * BytesPerElement + J;
5358 isa<ConstantSDNode>(ShuffleOp.
getOperand(1))) {
5360 Bytes.
resize(NumElements * BytesPerElement, -1);
5361 for (
unsigned I = 0;
I < NumElements; ++
I)
5362 for (
unsigned J = 0; J < BytesPerElement; ++J)
5363 Bytes[
I * BytesPerElement + J] = Index * BytesPerElement + J;
5374 unsigned BytesPerElement,
int &
Base) {
5376 for (
unsigned I = 0;
I < BytesPerElement; ++
I) {
5377 if (Bytes[Start +
I] >= 0) {
5378 unsigned Elem = Bytes[Start +
I];
5382 if (
unsigned(
Base) % Bytes.
size() + BytesPerElement > Bytes.
size())
5384 }
else if (
unsigned(
Base) != Elem -
I)
5397 unsigned &StartIndex,
unsigned &OpNo0,
5399 int OpNos[] = { -1, -1 };
5401 for (
unsigned I = 0;
I < 16; ++
I) {
5402 int Index = Bytes[
I];
5408 Shift = ExpectedShift;
5409 else if (Shift != ExpectedShift)
5413 if (OpNos[ModelOpNo] == 1 - RealOpNo)
5415 OpNos[ModelOpNo] = RealOpNo;
5452 N =
N->getOperand(0);
5454 if (
auto *
Op = dyn_cast<ConstantSDNode>(
N->getOperand(0)))
5455 return Op->getZExtValue() == 0;
5461 for (
unsigned I = 0;
I < Num ;
I++)
5473 for (
unsigned I = 0;
I < 2; ++
I)
5477 unsigned StartIndex, OpNo0, OpNo1;
5486 if (ZeroVecIdx != UINT32_MAX) {
5487 bool MaskFirst =
true;
5492 if (OpNo == ZeroVecIdx &&
I == 0) {
5497 if (OpNo != ZeroVecIdx && Byte == 0) {
5504 if (ZeroIdx != -1) {
5507 if (Bytes[
I] >= 0) {
5510 if (OpNo == ZeroVecIdx)
5520 SDValue Src = ZeroVecIdx == 0 ? Ops[1] : Ops[0];
5538 (!Ops[1].
isUndef() ? Ops[1] : Ops[0]), Op2);
5543struct GeneralShuffle {
5544 GeneralShuffle(
EVT vt) : VT(vt), UnpackFromEltSize(UINT_MAX) {}
5548 void tryPrepareForUnpack();
5549 bool unpackWasPrepared() {
return UnpackFromEltSize <= 4; }
5564 unsigned UnpackFromEltSize;
5569void GeneralShuffle::addUndef() {
5571 for (
unsigned I = 0;
I < BytesPerElement; ++
I)
5572 Bytes.push_back(-1);
5581bool GeneralShuffle::add(
SDValue Op,
unsigned Elem) {
5587 EVT FromVT =
Op.getNode() ?
Op.getValueType() : VT;
5592 if (FromBytesPerElement < BytesPerElement)
5596 (FromBytesPerElement - BytesPerElement));
5599 while (
Op.getNode()) {
5601 Op =
Op.getOperand(0);
5617 }
else if (
Op.isUndef()) {
5626 for (; OpNo < Ops.size(); ++OpNo)
5627 if (Ops[OpNo] ==
Op)
5629 if (OpNo == Ops.size())
5634 for (
unsigned I = 0;
I < BytesPerElement; ++
I)
5635 Bytes.push_back(
Base +
I);
5644 if (Ops.size() == 0)
5648 tryPrepareForUnpack();
5651 if (Ops.size() == 1)
5652 Ops.push_back(DAG.
getUNDEF(MVT::v16i8));
5663 unsigned Stride = 1;
5664 for (; Stride * 2 < Ops.size(); Stride *= 2) {
5665 for (
unsigned I = 0;
I < Ops.size() - Stride;
I += Stride * 2) {
5666 SDValue SubOps[] = { Ops[
I], Ops[
I + Stride] };
5675 else if (OpNo ==
I + Stride)
5686 if (NewBytes[J] >= 0) {
5688 "Invalid double permute");
5691 assert(NewBytesMap[J] < 0 &&
"Invalid double permute");
5697 if (NewBytes[J] >= 0)
5705 Ops[1] = Ops[Stride];
5713 unsigned OpNo0, OpNo1;
5715 if (unpackWasPrepared() && Ops[1].
isUndef())
5717 else if (
const Permute *
P =
matchPermute(Bytes, OpNo0, OpNo1))
5722 Op = insertUnpackIfPrepared(DAG,
DL,
Op);
5729 dbgs() << Msg.c_str() <<
" { ";
5730 for (
unsigned i = 0; i < Bytes.
size(); i++)
5731 dbgs() << Bytes[i] <<
" ";
5739void GeneralShuffle::tryPrepareForUnpack() {
5741 if (ZeroVecOpNo == UINT32_MAX || Ops.size() == 1)
5746 if (Ops.size() > 2 &&
5751 UnpackFromEltSize = 1;
5752 for (; UnpackFromEltSize <= 4; UnpackFromEltSize *= 2) {
5753 bool MatchUnpack =
true;
5756 unsigned ToEltSize = UnpackFromEltSize * 2;
5757 bool IsZextByte = (Elt % ToEltSize) < UnpackFromEltSize;
5760 if (Bytes[Elt] != -1) {
5762 if (IsZextByte != (OpNo == ZeroVecOpNo)) {
5763 MatchUnpack =
false;
5769 if (Ops.size() == 2) {
5772 if (SrcBytes[i] != -1 && SrcBytes[i] % 16 !=
int(i)) {
5773 UnpackFromEltSize = UINT_MAX;
5780 if (UnpackFromEltSize > 4)
5783 LLVM_DEBUG(
dbgs() <<
"Preparing for final unpack of element size "
5784 << UnpackFromEltSize <<
". Zero vector is Op#" << ZeroVecOpNo
5786 dumpBytes(Bytes,
"Original Bytes vector:"););
5791 Elt += UnpackFromEltSize;
5792 for (
unsigned i = 0; i < UnpackFromEltSize; i++, Elt++,
B++)
5793 Bytes[
B] = Bytes[Elt];
5799 Ops.erase(&Ops[ZeroVecOpNo]);
5801 if (Bytes[
I] >= 0) {
5803 if (OpNo > ZeroVecOpNo)
5814 if (!unpackWasPrepared())
5816 unsigned InBits = UnpackFromEltSize * 8;
5820 unsigned OutBits = InBits * 2;
5829 if (!
Op.getOperand(
I).isUndef())
5845 if (
Value.isUndef())
5898 GeneralShuffle GS(VT);
5900 bool FoundOne =
false;
5901 for (
unsigned I = 0;
I < NumElements; ++
I) {
5904 Op =
Op.getOperand(0);
5907 unsigned Elem =
Op.getConstantOperandVal(1);
5908 if (!GS.add(
Op.getOperand(0), Elem))
5911 }
else if (
Op.isUndef()) {
5925 if (!ResidueOps.
empty()) {
5926 while (ResidueOps.
size() < NumElements)
5928 for (
auto &
Op : GS.Ops) {
5929 if (!
Op.getNode()) {
5935 return GS.getNode(DAG,
SDLoc(BVN));
5938bool SystemZTargetLowering::isVectorElementLoad(
SDValue Op)
const {
5939 if (
Op.getOpcode() ==
ISD::LOAD && cast<LoadSDNode>(
Op)->isUnindexed())
5941 if (
auto *AL = dyn_cast<AtomicSDNode>(
Op))
5955 unsigned int NumElements = Elems.
size();
5956 unsigned int Count = 0;
5957 for (
auto Elem : Elems) {
5958 if (!Elem.isUndef()) {
5961 else if (Elem != Single) {
5981 if (
Single.getNode() && (Count > 1 || isVectorElementLoad(Single)))
5985 bool AllLoads =
true;
5986 for (
auto Elem : Elems)
5987 if (!isVectorElementLoad(Elem)) {
5993 if (VT == MVT::v2i64 && !AllLoads)
5997 if (VT == MVT::v2f64 && !AllLoads)
6007 if (VT == MVT::v4f32 && !AllLoads) {
6021 DL, MVT::v2i64, Op01, Op23);
6029 unsigned NumConstants = 0;
6030 for (
unsigned I = 0;
I < NumElements; ++
I) {
6044 if (NumConstants > 0) {
6045 for (
unsigned I = 0;
I < NumElements; ++
I)
6056 std::map<const SDNode*, unsigned> UseCounts;
6057 SDNode *LoadMaxUses =
nullptr;
6058 for (
unsigned I = 0;
I < NumElements; ++
I)
6059 if (isVectorElementLoad(Elems[
I])) {
6060 SDNode *Ld = Elems[
I].getNode();
6062 if (LoadMaxUses ==
nullptr || UseCounts[LoadMaxUses] < UseCounts[Ld])
6065 if (LoadMaxUses !=
nullptr) {
6066 ReplicatedVal =
SDValue(LoadMaxUses, 0);
6070 unsigned I1 = NumElements / 2 - 1;
6071 unsigned I2 = NumElements - 1;
6072 bool Def1 = !Elems[
I1].isUndef();
6073 bool Def2 = !Elems[I2].isUndef();
6087 for (
unsigned I = 0;
I < NumElements; ++
I)
6088 if (!
Done[
I] && !Elems[
I].
isUndef() && Elems[
I] != ReplicatedVal)
6096 auto *BVN = cast<BuildVectorSDNode>(
Op.getNode());
6098 EVT VT =
Op.getValueType();
6100 if (BVN->isConstant()) {
6119 for (
unsigned I = 0;
I < NumElements; ++
I)
6120 Ops[
I] =
Op.getOperand(
I);
6121 return buildVector(DAG,
DL, VT, Ops);
6126 auto *VSN = cast<ShuffleVectorSDNode>(
Op.getNode());
6128 EVT VT =
Op.getValueType();
6131 if (VSN->isSplat()) {
6133 unsigned Index = VSN->getSplatIndex();
6135 "Splat index should be defined and in first operand");
6145 GeneralShuffle
GS(VT);
6146 for (
unsigned I = 0;
I < NumElements; ++
I) {
6147 int Elt = VSN->getMaskElt(
I);
6150 else if (!
GS.add(
Op.getOperand(
unsigned(Elt) / NumElements),
6151 unsigned(Elt) % NumElements))
6154 return GS.getNode(DAG,
SDLoc(VSN));
6173 EVT VT =
Op.getValueType();
6178 if (VT == MVT::v2f64 &&
6198SystemZTargetLowering::lowerEXTRACT_VECTOR_ELT(
SDValue Op,
6204 EVT VT =
Op.getValueType();
6208 if (
auto *CIndexN = dyn_cast<ConstantSDNode>(Op1)) {
6223SDValue SystemZTargetLowering::
6226 EVT OutVT =
Op.getValueType();
6236 }
while (FromBits != ToBits);
6241SDValue SystemZTargetLowering::
6245 EVT OutVT =
Op.getValueType();
6249 unsigned NumInPerOut = InNumElts / OutNumElts;
6255 unsigned ZeroVecElt = InNumElts;
6256 for (
unsigned PackedElt = 0; PackedElt < OutNumElts; PackedElt++) {
6257 unsigned MaskElt = PackedElt * NumInPerOut;
6258 unsigned End = MaskElt + NumInPerOut - 1;
6259 for (; MaskElt <
End; MaskElt++)
6260 Mask[MaskElt] = ZeroVecElt++;
6261 Mask[MaskElt] = PackedElt;
6268 unsigned ByScalar)
const {
6273 EVT VT =
Op.getValueType();
6277 if (
auto *BVN = dyn_cast<BuildVectorSDNode>(Op1)) {
6278 APInt SplatBits, SplatUndef;
6279 unsigned SplatBitSize;
6283 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs,
6284 ElemBitSize,
true) &&
6285 SplatBitSize == ElemBitSize) {
6288 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6297 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6303 if (
auto *VSN = dyn_cast<ShuffleVectorSDNode>(Op1)) {
6304 if (VSN->isSplat()) {
6306 unsigned Index = VSN->getSplatIndex();
6308 "Splat index should be defined and in first operand");
6315 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6327 MVT DstVT =
Op.getSimpleValueType();
6330 unsigned SrcAS =
N->getSrcAddressSpace();
6332 assert(SrcAS !=
N->getDestAddressSpace() &&
6333 "addrspacecast must be between different address spaces");
6341 }
else if (DstVT == MVT::i32) {
6355 MVT ResultVT =
Op.getSimpleValueType();
6357 unsigned Check =
Op.getConstantOperandVal(1);
6359 unsigned TDCMask = 0;
6393 int SPFI = cast<FrameIndexSDNode>(
StackPtr.getNode())->getIndex();
6404 return DAG.
getLoad(MVT::i64,
DL, Chain, StackPtr, MPI);
6409 switch (
Op.getOpcode()) {
6411 return lowerFRAMEADDR(
Op, DAG);
6413 return lowerRETURNADDR(
Op, DAG);
6415 return lowerBR_CC(
Op, DAG);
6417 return lowerSELECT_CC(
Op, DAG);
6419 return lowerSETCC(
Op, DAG);
6421 return lowerSTRICT_FSETCC(
Op, DAG,
false);
6423 return lowerSTRICT_FSETCC(
Op, DAG,
true);
6425 return lowerGlobalAddress(cast<GlobalAddressSDNode>(
Op), DAG);
6427 return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(
Op), DAG);
6429 return lowerBlockAddress(cast<BlockAddressSDNode>(
Op), DAG);
6431 return lowerJumpTable(cast<JumpTableSDNode>(
Op), DAG);
6433 return lowerConstantPool(cast<ConstantPoolSDNode>(
Op), DAG);
6435 return lowerBITCAST(
Op, DAG);
6437 return lowerVASTART(
Op, DAG);
6439 return lowerVACOPY(
Op, DAG);
6441 return lowerDYNAMIC_STACKALLOC(
Op, DAG);
6443 return lowerGET_DYNAMIC_AREA_OFFSET(
Op, DAG);
6445 return lowerSMUL_LOHI(
Op, DAG);
6447 return lowerUMUL_LOHI(
Op, DAG);
6449 return lowerSDIVREM(
Op, DAG);
6451 return lowerUDIVREM(
Op, DAG);
6456 return lowerXALUO(
Op, DAG);
6459 return lowerUADDSUBO_CARRY(
Op, DAG);
6461 return lowerOR(
Op, DAG);
6463 return lowerCTPOP(
Op, DAG);
6465 return lowerVECREDUCE_ADD(
Op, DAG);
6467 return lowerATOMIC_FENCE(
Op, DAG);
6472 return lowerATOMIC_LDST_I128(
Op, DAG);
6476 return lowerATOMIC_LOAD_SUB(
Op, DAG);
6494 return lowerATOMIC_CMP_SWAP(
Op, DAG);
6496 return lowerSTACKSAVE(
Op, DAG);
6498 return lowerSTACKRESTORE(
Op, DAG);
6500 return lowerPREFETCH(
Op, DAG);
6502 return lowerINTRINSIC_W_CHAIN(
Op, DAG);
6504 return lowerINTRINSIC_WO_CHAIN(
Op, DAG);
6506 return lowerBUILD_VECTOR(
Op, DAG);
6508 return lowerVECTOR_SHUFFLE(
Op, DAG);
6510 return lowerSCALAR_TO_VECTOR(
Op, DAG);
6512 return lowerINSERT_VECTOR_ELT(
Op, DAG);
6514 return lowerEXTRACT_VECTOR_ELT(
Op, DAG);
6516 return lowerSIGN_EXTEND_VECTOR_INREG(
Op, DAG);
6518 return lowerZERO_EXTEND_VECTOR_INREG(
Op, DAG);
6530 return lowerIS_FPCLASS(
Op, DAG);
6532 return lowerGET_ROUNDING(
Op, DAG);
6534 return lowerREADCYCLECOUNTER(
Op, DAG);
6556 &SystemZ::FP128BitRegClass);
6565 SystemZ::REG_SEQUENCE, SL, MVT::f128,
6580 &SystemZ::FP128BitRegClass);
6598 switch (
N->getOpcode()) {
6602 SDValue Ops[] = {
N->getOperand(0),
N->getOperand(1) };
6605 DL, Tys, Ops, MVT::i128, MMO);
6608 if (
N->getValueType(0) == MVT::f128)
6622 SDValue Ops[] = {
N->getOperand(0), Val,
N->getOperand(2)};
6625 DL, Tys, Ops, MVT::i128, MMO);
6628 if (cast<AtomicSDNode>(
N)->getSuccessOrdering() ==
6631 MVT::Other, Res), 0);
6638 SDValue Ops[] = {
N->getOperand(0),
N->getOperand(1),
6643 DL, Tys, Ops, MVT::i128, MMO);
6654 if (
N->getValueType(0) == MVT::i128 && Src.getValueType() == MVT::f128 &&
6674#define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME
6785 OPCODE(ATOMIC_LOADW_ADD);
6786 OPCODE(ATOMIC_LOADW_SUB);
6787 OPCODE(ATOMIC_LOADW_AND);
6789 OPCODE(ATOMIC_LOADW_XOR);
6790 OPCODE(ATOMIC_LOADW_NAND);
6791 OPCODE(ATOMIC_LOADW_MIN);
6792 OPCODE(ATOMIC_LOADW_MAX);
6793 OPCODE(ATOMIC_LOADW_UMIN);
6794 OPCODE(ATOMIC_LOADW_UMAX);
6795 OPCODE(ATOMIC_CMP_SWAPW);
6798 OPCODE(ATOMIC_STORE_128);
6799 OPCODE(ATOMIC_CMP_SWAP_128);
6814bool SystemZTargetLowering::canTreatAsByteVector(
EVT VT)
const {
6815 if (!Subtarget.hasVector())
6829 DAGCombinerInfo &DCI,
6837 unsigned Opcode =
Op.getOpcode();
6840 Op =
Op.getOperand(0);
6842 canTreatAsByteVector(
Op.getValueType())) {
6851 BytesPerElement,
First))
6858 if (Byte % BytesPerElement != 0)
6861 Index = Byte / BytesPerElement;
6865 canTreatAsByteVector(
Op.getValueType())) {
6868 EVT OpVT =
Op.getValueType();
6870 if (OpBytesPerElement < BytesPerElement)
6874 unsigned End = (
Index + 1) * BytesPerElement;
6875 if (
End % OpBytesPerElement != 0)
6878 Op =
Op.getOperand(
End / OpBytesPerElement - 1);
6879 if (!
Op.getValueType().isInteger()) {
6882 DCI.AddToWorklist(
Op.getNode());
6887 DCI.AddToWorklist(
Op.getNode());
6894 canTreatAsByteVector(
Op.getValueType()) &&
6895 canTreatAsByteVector(
Op.getOperand(0).getValueType())) {
6897 EVT ExtVT =
Op.getValueType();
6898 EVT OpVT =
Op.getOperand(0).getValueType();
6901 unsigned Byte =
Index * BytesPerElement;
6902 unsigned SubByte =
Byte % ExtBytesPerElement;
6903 unsigned MinSubByte = ExtBytesPerElement - OpBytesPerElement;
6904 if (SubByte < MinSubByte ||
6905 SubByte + BytesPerElement > ExtBytesPerElement)
6908 Byte =
Byte / ExtBytesPerElement * OpBytesPerElement;
6910 Byte += SubByte - MinSubByte;
6911 if (Byte % BytesPerElement != 0)
6913 Op =
Op.getOperand(0);
6920 if (
Op.getValueType() != VecVT) {
6922 DCI.AddToWorklist(
Op.getNode());
6932SDValue SystemZTargetLowering::combineTruncateExtract(
6941 if (canTreatAsByteVector(VecVT)) {
6942 if (
auto *IndexN = dyn_cast<ConstantSDNode>(
Op.getOperand(1))) {
6945 if (BytesPerElement % TruncBytes == 0) {
6951 unsigned Scale = BytesPerElement / TruncBytes;
6952 unsigned NewIndex = (IndexN->getZExtValue() + 1) * Scale - 1;
6959 EVT ResVT = (TruncBytes < 4 ? MVT::i32 : TruncVT);
6960 return combineExtract(
DL, ResVT, VecVT, Vec, NewIndex, DCI,
true);
6968SDValue SystemZTargetLowering::combineZERO_EXTEND(
6969 SDNode *
N, DAGCombinerInfo &DCI)
const {
6973 EVT VT =
N->getValueType(0);
6975 auto *TrueOp = dyn_cast<ConstantSDNode>(N0.
getOperand(0));
6976 auto *FalseOp = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
6977 if (TrueOp && FalseOp) {
6987 DCI.CombineTo(N0.
getNode(), TruncSelect);
7017SDValue SystemZTargetLowering::combineSIGN_EXTEND_INREG(
7018 SDNode *
N, DAGCombinerInfo &DCI)
const {
7024 EVT VT =
N->getValueType(0);
7025 EVT EVT = cast<VTSDNode>(
N->getOperand(1))->getVT();
7038SDValue SystemZTargetLowering::combineSIGN_EXTEND(
7039 SDNode *
N, DAGCombinerInfo &DCI)
const {
7045 EVT VT =
N->getValueType(0);
7047 auto *SraAmt = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
7050 if (
auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.
getOperand(1))) {
7052 unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra;
7053 unsigned NewSraAmt = SraAmt->getZExtValue() + Extra;
7069SDValue SystemZTargetLowering::combineMERGE(
7070 SDNode *
N, DAGCombinerInfo &DCI)
const {
7072 unsigned Opcode =
N->getOpcode();
7080 if (Op1 ==
N->getOperand(0))
7085 if (ElemBytes <= 4) {
7093 DCI.AddToWorklist(Op1.
getNode());
7096 DCI.AddToWorklist(
Op.getNode());
7105 LoPart = HiPart =
nullptr;
7110 if (
Use.getResNo() != 0)
7115 bool IsLoPart =
true;
7140 LoPart = HiPart =
nullptr;
7145 if (
Use.getResNo() != 0)
7151 User->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
7154 switch (
User->getConstantOperandVal(1)) {
7155 case SystemZ::subreg_l64:
7160 case SystemZ::subreg_h64:
7172SDValue SystemZTargetLowering::combineLOAD(
7173 SDNode *
N, DAGCombinerInfo &DCI)
const {
7175 EVT LdVT =
N->getValueType(0);
7176 if (
auto *LN = dyn_cast<LoadSDNode>(
N)) {
7179 MVT LoadNodeVT = LN->getBasePtr().getSimpleValueType();
7180 if (PtrVT != LoadNodeVT) {
7184 return DAG.
getExtLoad(LN->getExtensionType(),
DL, LN->getValueType(0),
7185 LN->getChain(), AddrSpaceCast, LN->getMemoryVT(),
7186 LN->getMemOperand());
7204 LD->getPointerInfo(),
LD->getOriginalAlign(),
7205 LD->getMemOperand()->getFlags(),
LD->getAAInfo());
7207 DCI.CombineTo(HiPart, EltLoad,
true);
7214 LD->getPointerInfo().getWithOffset(8),
LD->getOriginalAlign(),
7215 LD->getMemOperand()->getFlags(),
LD->getAAInfo());
7217 DCI.CombineTo(LoPart, EltLoad,
true);
7224 DCI.AddToWorklist(Chain.
getNode());
7244 }
else if (
Use.getResNo() == 0)
7247 if (!Replicate || OtherUses.
empty())
7253 for (
SDNode *U : OtherUses) {
7262bool SystemZTargetLowering::canLoadStoreByteSwapped(
EVT VT)
const {
7263 if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64)
7265 if (Subtarget.hasVectorEnhancements2())
7266 if (VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v2i64 || VT == MVT::i128)
7278 for (
unsigned i = 0; i < NumElts; ++i) {
7279 if (M[i] < 0)
continue;
7280 if ((
unsigned) M[i] != NumElts - 1 - i)
7288 for (
auto *U : StoredVal->
users()) {
7290 EVT CurrMemVT = ST->getMemoryVT().getScalarType();
7293 }
else if (isa<BuildVectorSDNode>(U)) {
7349SDValue SystemZTargetLowering::combineSTORE(
7350 SDNode *
N, DAGCombinerInfo &DCI)
const {
7352 auto *SN = cast<StoreSDNode>(
N);
7353 auto &Op1 =
N->getOperand(1);
7354 EVT MemVT = SN->getMemoryVT();
7358 MVT StoreNodeVT = SN->getBasePtr().getSimpleValueType();
7359 if (PtrVT != StoreNodeVT) {
7363 return DAG.
getStore(SN->getChain(),
DL, SN->getValue(), AddrSpaceCast,
7364 SN->getPointerInfo(), SN->getOriginalAlign(),
7365 SN->getMemOperand()->getFlags(), SN->getAAInfo());
7373 if (MemVT.
isInteger() && SN->isTruncatingStore()) {
7375 combineTruncateExtract(
SDLoc(
N), MemVT, SN->getValue(), DCI)) {
7376 DCI.AddToWorklist(
Value.getNode());
7380 SN->getBasePtr(), SN->getMemoryVT(),
7381 SN->getMemOperand());
7385 if (!SN->isTruncatingStore() &&
7396 N->getOperand(0), BSwapOp,
N->getOperand(2)
7401 Ops, MemVT, SN->getMemOperand());
7404 if (!SN->isTruncatingStore() &&
7407 Subtarget.hasVectorEnhancements2()) {
7417 Ops, MemVT, SN->getMemOperand());
7422 if (!SN->isTruncatingStore() &&
7425 N->getOperand(0).reachesChainWithoutSideEffects(
SDValue(Op1.
getNode(), 1))) {
7429 Ops, MemVT, SN->getMemOperand());
7439 DAG.
getStore(SN->getChain(),
DL, HiPart, SN->getBasePtr(),
7440 SN->getPointerInfo(), SN->getOriginalAlign(),
7441 SN->getMemOperand()->getFlags(), SN->getAAInfo());
7446 SN->getPointerInfo().getWithOffset(8),
7447 SN->getOriginalAlign(),
7448 SN->getMemOperand()->
getFlags(), SN->getAAInfo());
7468 if (
C->getAPIntValue().getBitWidth() > 64 ||
C->isAllOnes() ||
7472 APInt Val =
C->getAPIntValue();
7475 assert(SN->isTruncatingStore() &&
7476 "Non-truncating store and immediate value does not fit?");
7477 Val = Val.
trunc(TotBytes * 8);
7481 if (VCI.isVectorConstantLegal(Subtarget) &&
7490 auto FindReplicatedReg = [&](
SDValue MulOp) {
7491 EVT MulVT = MulOp.getValueType();
7492 if (MulOp->getOpcode() ==
ISD::MUL &&
7493 (MulVT == MVT::i16 || MulVT == MVT::i32 || MulVT == MVT::i64)) {
7497 WordVT =
LHS->getOperand(0).getValueType();
7499 WordVT = cast<VTSDNode>(
LHS->getOperand(1))->getVT();
7503 if (
auto *
C = dyn_cast<ConstantSDNode>(MulOp->getOperand(1))) {
7505 APInt(MulVT.getSizeInBits(),
C->getZExtValue()));
7506 if (VCI.isVectorConstantLegal(Subtarget) &&
7508 WordVT == VCI.VecVT.getScalarType())
7514 if (isa<BuildVectorSDNode>(Op1) &&
7517 if (
auto *
C = dyn_cast<ConstantSDNode>(SplatVal))
7520 FindReplicatedReg(SplatVal);
7522 if (
auto *
C = dyn_cast<ConstantSDNode>(Op1))
7525 FindReplicatedReg(Op1);
7530 "Bad type handling");
7535 SN->getBasePtr(), SN->getMemOperand());
7542SDValue SystemZTargetLowering::combineVECTOR_SHUFFLE(
7543 SDNode *
N, DAGCombinerInfo &DCI)
const {
7547 N->getOperand(0).hasOneUse() &&
7548 Subtarget.hasVectorEnhancements2()) {
7563 Ops,
LD->getMemoryVT(),
LD->getMemOperand());
7567 DCI.CombineTo(
N, ESLoad);
7571 DCI.CombineTo(
Load.getNode(), ESLoad, ESLoad.
getValue(1));
7581SDValue SystemZTargetLowering::combineEXTRACT_VECTOR_ELT(
7582 SDNode *
N, DAGCombinerInfo &DCI)
const {
7585 if (!Subtarget.hasVector())
7591 Op.getValueType().isVector() &&
7592 Op.getOperand(0).getValueType().isVector() &&
7593 Op.getValueType().getVectorNumElements() ==
7594 Op.getOperand(0).getValueType().getVectorNumElements())
7595 Op =
Op.getOperand(0);
7599 EVT VecVT =
Op.getValueType();
7602 Op.getOperand(0),
N->getOperand(1));
7603 DCI.AddToWorklist(
Op.getNode());
7605 if (EltVT !=
N->getValueType(0)) {
7606 DCI.AddToWorklist(
Op.getNode());
7613 if (
auto *IndexN = dyn_cast<ConstantSDNode>(
N->getOperand(1))) {
7616 if (canTreatAsByteVector(VecVT))
7617 return combineExtract(
SDLoc(
N),
N->getValueType(0), VecVT, Op0,
7618 IndexN->getZExtValue(), DCI,
false);
7623SDValue SystemZTargetLowering::combineJOIN_DWORDS(
7624 SDNode *
N, DAGCombinerInfo &DCI)
const {
7627 if (
N->getOperand(0) ==
N->getOperand(1))
7638 if (Chain1 == Chain2)
7646SDValue SystemZTargetLowering::combineFP_ROUND(
7647 SDNode *
N, DAGCombinerInfo &DCI)
const {
7649 if (!Subtarget.hasVector())
7658 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
7661 if (
N->getValueType(0) == MVT::f32 && Op0.
hasOneUse() &&
7667 for (
auto *U : Vec->
users()) {
7668 if (U != Op0.
getNode() &&
U->hasOneUse() &&
7670 U->getOperand(0) == Vec &&
7672 U->getConstantOperandVal(1) == 1) {
7674 if (OtherRound.
getOpcode() ==
N->getOpcode() &&
7678 if (
N->isStrictFPOpcode()) {
7683 {MVT::v4f32, MVT::Other}, {Chain, Vec});
7688 DCI.AddToWorklist(VRound.
getNode());
7692 DCI.AddToWorklist(Extract1.
getNode());
7701 N->getVTList(), Extract0, Chain);
7710SDValue SystemZTargetLowering::combineFP_EXTEND(
7711 SDNode *
N, DAGCombinerInfo &DCI)
const {
7713 if (!Subtarget.hasVector())
7722 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
7725 if (
N->getValueType(0) == MVT::f64 && Op0.
hasOneUse() &&
7731 for (
auto *U : Vec->
users()) {
7732 if (U != Op0.
getNode() &&
U->hasOneUse() &&
7734 U->getOperand(0) == Vec &&
7736 U->getConstantOperandVal(1) == 2) {
7738 if (OtherExtend.
getOpcode() ==
N->getOpcode() &&
7742 if (
N->isStrictFPOpcode()) {
7747 {MVT::v2f64, MVT::Other}, {Chain, Vec});
7752 DCI.AddToWorklist(VExtend.
getNode());
7756 DCI.AddToWorklist(Extract1.
getNode());
7765 N->getVTList(), Extract0, Chain);
7774SDValue SystemZTargetLowering::combineINT_TO_FP(
7775 SDNode *
N, DAGCombinerInfo &DCI)
const {
7780 unsigned Opcode =
N->getOpcode();
7781 EVT OutVT =
N->getValueType(0);
7785 unsigned InScalarBits =
Op->getValueType(0).getScalarSizeInBits();
7791 if (OutLLVMTy->
isVectorTy() && OutScalarBits > InScalarBits &&
7792 OutScalarBits <= 64) {
7793 unsigned NumElts = cast<FixedVectorType>(OutLLVMTy)->getNumElements();
7796 unsigned ExtOpcode =
7804SDValue SystemZTargetLowering::combineBSWAP(
7805 SDNode *
N, DAGCombinerInfo &DCI)
const {
7809 N->getOperand(0).hasOneUse() &&
7810 canLoadStoreByteSwapped(
N->getValueType(0))) {
7819 EVT LoadVT =
N->getValueType(0);
7820 if (LoadVT == MVT::i16)
7825 Ops,
LD->getMemoryVT(),
LD->getMemOperand());
7829 if (
N->getValueType(0) == MVT::i16)
7834 DCI.CombineTo(
N, ResVal);
7838 DCI.CombineTo(
Load.getNode(), ResVal, BSLoad.
getValue(1));
7847 Op.getValueType().isVector() &&
7848 Op.getOperand(0).getValueType().isVector() &&
7849 Op.getValueType().getVectorNumElements() ==
7850 Op.getOperand(0).getValueType().getVectorNumElements())
7851 Op =
Op.getOperand(0);
7863 (canLoadStoreByteSwapped(
N->getValueType(0)) &&
7865 EVT VecVT =
N->getValueType(0);
7866 EVT EltVT =
N->getValueType(0).getVectorElementType();
7869 DCI.AddToWorklist(Vec.
getNode());
7873 DCI.AddToWorklist(Elt.
getNode());
7876 DCI.AddToWorklist(Vec.
getNode());
7878 DCI.AddToWorklist(Elt.
getNode());
7886 if (SV &&
Op.hasOneUse()) {
7894 EVT VecVT =
N->getValueType(0);
7897 DCI.AddToWorklist(Op0.
getNode());
7901 DCI.AddToWorklist(Op1.
getNode());
7904 DCI.AddToWorklist(Op0.
getNode());
7906 DCI.AddToWorklist(Op1.
getNode());
7928 auto *CompareRHS = dyn_cast<ConstantSDNode>(ICmp->getOperand(1));
7935 bool Invert =
false;
7942 auto *TrueVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(0));
7945 auto *FalseVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1));
7948 if (CompareRHS->getZExtValue() == FalseVal->getZExtValue())
7950 else if (CompareRHS->getZExtValue() != TrueVal->getZExtValue())
7954 auto *NewCCValid = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(2));
7955 auto *NewCCMask = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(3));
7956 if (!NewCCValid || !NewCCMask)
7958 CCValid = NewCCValid->getZExtValue();
7959 CCMask = NewCCMask->getZExtValue();
7969 if (CompareLHS->getOpcode() ==
ISD::SRA) {
7970 auto *SRACount = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1));
7971 if (!SRACount || SRACount->getZExtValue() != 30)
7973 auto *SHL = CompareLHS->getOperand(0).getNode();
7976 auto *SHLCount = dyn_cast<ConstantSDNode>(SHL->getOperand(1));
7979 auto *IPM = SHL->getOperand(0).getNode();
7984 if (!CompareLHS->hasOneUse())
7987 if (CompareRHS->getZExtValue() != 0)
7994 CCReg = IPM->getOperand(0);
8001SDValue SystemZTargetLowering::combineBR_CCMASK(
8002 SDNode *
N, DAGCombinerInfo &DCI)
const {
8006 auto *CCValid = dyn_cast<ConstantSDNode>(
N->getOperand(1));
8007 auto *CCMask = dyn_cast<ConstantSDNode>(
N->getOperand(2));
8008 if (!CCValid || !CCMask)
8011 int CCValidVal = CCValid->getZExtValue();
8012 int CCMaskVal = CCMask->getZExtValue();
8021 N->getOperand(3), CCReg);
8025SDValue SystemZTargetLowering::combineSELECT_CCMASK(
8026 SDNode *
N, DAGCombinerInfo &DCI)
const {
8030 auto *CCValid = dyn_cast<ConstantSDNode>(
N->getOperand(2));
8031 auto *CCMask = dyn_cast<ConstantSDNode>(
N->getOperand(3));
8032 if (!CCValid || !CCMask)
8035 int CCValidVal = CCValid->getZExtValue();
8036 int CCMaskVal = CCMask->getZExtValue();
8041 N->getOperand(0),
N->getOperand(1),
8049SDValue SystemZTargetLowering::combineGET_CCMASK(
8050 SDNode *
N, DAGCombinerInfo &DCI)
const {
8053 auto *CCValid = dyn_cast<ConstantSDNode>(
N->getOperand(1));
8054 auto *CCMask = dyn_cast<ConstantSDNode>(
N->getOperand(2));
8055 if (!CCValid || !CCMask)
8057 int CCValidVal = CCValid->getZExtValue();
8058 int CCMaskVal = CCMask->getZExtValue();
8066 auto *SelectCCValid = dyn_cast<ConstantSDNode>(
Select->getOperand(2));
8067 auto *SelectCCMask = dyn_cast<ConstantSDNode>(
Select->getOperand(3));
8068 if (!SelectCCValid || !SelectCCMask)
8070 int SelectCCValidVal = SelectCCValid->getZExtValue();
8071 int SelectCCMaskVal = SelectCCMask->getZExtValue();
8073 auto *
TrueVal = dyn_cast<ConstantSDNode>(
Select->getOperand(0));
8074 auto *
FalseVal = dyn_cast<ConstantSDNode>(
Select->getOperand(1));
8075 if (!TrueVal || !FalseVal)
8079 else if (
TrueVal->getZExtValue() == 0 &&
FalseVal->getZExtValue() == 1)
8080 SelectCCMaskVal ^= SelectCCValidVal;
8084 if (SelectCCValidVal & ~CCValidVal)
8086 if (SelectCCMaskVal != (CCMaskVal & SelectCCValidVal))
8089 return Select->getOperand(4);
8092SDValue SystemZTargetLowering::combineIntDIVREM(
8093 SDNode *
N, DAGCombinerInfo &DCI)
const {
8095 EVT VT =
N->getValueType(0);
8109SDValue SystemZTargetLowering::combineINTRINSIC(
8110 SDNode *
N, DAGCombinerInfo &DCI)
const {
8113 unsigned Id =
N->getConstantOperandVal(1);
8117 case Intrinsic::s390_vll:
8118 case Intrinsic::s390_vlrl:
8119 if (
auto *
C = dyn_cast<ConstantSDNode>(
N->getOperand(2)))
8120 if (
C->getZExtValue() >= 15)
8125 case Intrinsic::s390_vstl:
8126 case Intrinsic::s390_vstrl:
8127 if (
auto *
C = dyn_cast<ConstantSDNode>(
N->getOperand(3)))
8128 if (
C->getZExtValue() >= 15)
8139 return N->getOperand(0);
8145 switch(
N->getOpcode()) {
8170 case ISD::UREM:
return combineIntDIVREM(
N, DCI);
8182 EVT VT =
Op.getValueType();
8185 unsigned Opcode =
Op.getOpcode();
8187 unsigned Id =
Op.getConstantOperandVal(0);
8189 case Intrinsic::s390_vpksh:
8190 case Intrinsic::s390_vpksf:
8191 case Intrinsic::s390_vpksg:
8192 case Intrinsic::s390_vpkshs:
8193 case Intrinsic::s390_vpksfs:
8194 case Intrinsic::s390_vpksgs:
8195 case Intrinsic::s390_vpklsh:
8196 case Intrinsic::s390_vpklsf:
8197 case Intrinsic::s390_vpklsg:
8198 case Intrinsic::s390_vpklshs:
8199 case Intrinsic::s390_vpklsfs:
8200 case Intrinsic::s390_vpklsgs:
8202 SrcDemE = DemandedElts;
8205 SrcDemE = SrcDemE.
trunc(NumElts / 2);
8208 case Intrinsic::s390_vuphb:
8209 case Intrinsic::s390_vuphh:
8210 case Intrinsic::s390_vuphf:
8211 case Intrinsic::s390_vuplhb:
8212 case Intrinsic::s390_vuplhh:
8213 case Intrinsic::s390_vuplhf:
8214 SrcDemE =
APInt(NumElts * 2, 0);
8217 case Intrinsic::s390_vuplb:
8218 case Intrinsic::s390_vuplhw:
8219 case Intrinsic::s390_vuplf:
8220 case Intrinsic::s390_vupllb:
8221 case Intrinsic::s390_vupllh:
8222 case Intrinsic::s390_vupllf:
8223 SrcDemE =
APInt(NumElts * 2, 0);
8226 case Intrinsic::s390_vpdi: {
8228 SrcDemE =
APInt(NumElts, 0);
8229 if (!DemandedElts[OpNo - 1])
8231 unsigned Mask =
Op.getConstantOperandVal(3);
8232 unsigned MaskBit = ((OpNo - 1) ? 1 : 4);
8234 SrcDemE.
setBit((Mask & MaskBit)? 1 : 0);
8237 case Intrinsic::s390_vsldb: {
8239 assert(VT == MVT::v16i8 &&
"Unexpected type.");
8240 unsigned FirstIdx =
Op.getConstantOperandVal(3);
8241 assert (FirstIdx > 0 && FirstIdx < 16 &&
"Unused operand.");
8242 unsigned NumSrc0Els = 16 - FirstIdx;
8243 SrcDemE =
APInt(NumElts, 0);
8245 APInt DemEls = DemandedElts.
trunc(NumSrc0Els);
8248 APInt DemEls = DemandedElts.
lshr(NumSrc0Els);
8253 case Intrinsic::s390_vperm:
8264 SrcDemE =
APInt(1, 1);
8267 SrcDemE = DemandedElts;
8278 const APInt &DemandedElts,
8293 const APInt &DemandedElts,
8295 unsigned Depth)
const {
8299 unsigned tmp0, tmp1;
8304 EVT VT =
Op.getValueType();
8305 if (
Op.getResNo() != 0 || VT == MVT::Untyped)
8308 "KnownBits does not match VT in bitwidth");
8311 "DemandedElts does not match VT number of elements");
8313 unsigned Opcode =
Op.getOpcode();
8315 bool IsLogical =
false;
8316 unsigned Id =
Op.getConstantOperandVal(0);
8318 case Intrinsic::s390_vpksh:
8319 case Intrinsic::s390_vpksf:
8320 case Intrinsic::s390_vpksg:
8321 case Intrinsic::s390_vpkshs:
8322 case Intrinsic::s390_vpksfs:
8323 case Intrinsic::s390_vpksgs:
8324 case Intrinsic::s390_vpklsh:
8325 case Intrinsic::s390_vpklsf:
8326 case Intrinsic::s390_vpklsg:
8327 case Intrinsic::s390_vpklshs:
8328 case Intrinsic::s390_vpklsfs:
8329 case Intrinsic::s390_vpklsgs:
8330 case Intrinsic::s390_vpdi:
8331 case Intrinsic::s390_vsldb:
8332 case Intrinsic::s390_vperm:
8335 case Intrinsic::s390_vuplhb:
8336 case Intrinsic::s390_vuplhh:
8337 case Intrinsic::s390_vuplhf:
8338 case Intrinsic::s390_vupllb:
8339 case Intrinsic::s390_vupllh:
8340 case Intrinsic::s390_vupllf:
8343 case Intrinsic::s390_vuphb:
8344 case Intrinsic::s390_vuphh:
8345 case Intrinsic::s390_vuphf:
8346 case Intrinsic::s390_vuplb:
8347 case Intrinsic::s390_vuplhw:
8348 case Intrinsic::s390_vuplf: {
8390 if (
LHS == 1)
return 1;
8393 if (
RHS == 1)
return 1;
8394 unsigned Common = std::min(
LHS,
RHS);
8395 unsigned SrcBitWidth =
Op.getOperand(OpNo).getScalarValueSizeInBits();
8396 EVT VT =
Op.getValueType();
8398 if (SrcBitWidth > VTBits) {
8399 unsigned SrcExtraBits = SrcBitWidth - VTBits;
8400 if (Common > SrcExtraBits)
8401 return (Common - SrcExtraBits);
8404 assert (SrcBitWidth == VTBits &&
"Expected operands of same bitwidth.");
8411 unsigned Depth)
const {
8412 if (
Op.getResNo() != 0)
8414 unsigned Opcode =
Op.getOpcode();
8416 unsigned Id =
Op.getConstantOperandVal(0);
8418 case Intrinsic::s390_vpksh:
8419 case Intrinsic::s390_vpksf:
8420 case Intrinsic::s390_vpksg:
8421 case Intrinsic::s390_vpkshs:
8422 case Intrinsic::s390_vpksfs:
8423 case Intrinsic::s390_vpksgs:
8424 case Intrinsic::s390_vpklsh:
8425 case Intrinsic::s390_vpklsf:
8426 case Intrinsic::s390_vpklsg:
8427 case Intrinsic::s390_vpklshs:
8428 case Intrinsic::s390_vpklsfs:
8429 case Intrinsic::s390_vpklsgs:
8430 case Intrinsic::s390_vpdi:
8431 case Intrinsic::s390_vsldb:
8432 case Intrinsic::s390_vperm:
8434 case Intrinsic::s390_vuphb:
8435 case Intrinsic::s390_vuphh:
8436 case Intrinsic::s390_vuphf:
8437 case Intrinsic::s390_vuplb:
8438 case Intrinsic::s390_vuplhw:
8439 case Intrinsic::s390_vuplf: {
8443 EVT VT =
Op.getValueType();
8467 switch (
Op->getOpcode()) {
8480 "Unexpected stack alignment");
8483 unsigned StackProbeSize =
8486 StackProbeSize &= ~(StackAlign - 1);
8487 return StackProbeSize ? StackProbeSize : StackAlign;
8504 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
8510 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
8536 if (Succ->isLiveIn(SystemZ::CC))
8547 switch (
MI.getOpcode()) {
8548 case SystemZ::Select32:
8549 case SystemZ::Select64:
8550 case SystemZ::Select128:
8551 case SystemZ::SelectF32:
8552 case SystemZ::SelectF64:
8553 case SystemZ::SelectF128:
8554 case SystemZ::SelectVR32:
8555 case SystemZ::SelectVR64:
8556 case SystemZ::SelectVR128:
8588 for (
auto *
MI : Selects) {
8589 Register DestReg =
MI->getOperand(0).getReg();
8590 Register TrueReg =
MI->getOperand(1).getReg();
8591 Register FalseReg =
MI->getOperand(2).getReg();
8596 if (
MI->getOperand(4).getImm() == (CCValid ^ CCMask))
8599 if (
auto It = RegRewriteTable.
find(TrueReg); It != RegRewriteTable.
end())
8600 TrueReg = It->second.first;
8602 if (
auto It = RegRewriteTable.
find(FalseReg); It != RegRewriteTable.
end())
8603 FalseReg = It->second.second;
8606 BuildMI(*SinkMBB, SinkInsertionPoint,
DL,
TII->get(SystemZ::PHI), DestReg)
8611 RegRewriteTable[DestReg] = std::make_pair(TrueReg, FalseReg);
8623 assert(TFL->hasReservedCallFrame(MF) &&
8624 "ADJSTACKDOWN and ADJSTACKUP should be no-ops");
8629 uint32_t NumBytes =
MI.getOperand(0).getImm();
8634 MI.eraseFromParent();
8645 unsigned CCValid =
MI.getOperand(3).getImm();
8646 unsigned CCMask =
MI.getOperand(4).getImm();
8658 assert(NextMI.getOperand(3).getImm() == CCValid &&
8659 "Bad CCValid operands since CC was not redefined.");
8660 if (NextMI.getOperand(4).getImm() == CCMask ||
8661 NextMI.getOperand(4).getImm() == (CCValid ^ CCMask)) {
8667 if (NextMI.definesRegister(SystemZ::CC,
nullptr) ||
8668 NextMI.usesCustomInsertionHook())
8671 for (
auto *SelMI : Selects)
8672 if (NextMI.readsVirtualRegister(SelMI->getOperand(0).getReg())) {
8676 if (NextMI.isDebugInstr()) {
8678 assert(NextMI.isDebugValue() &&
"Unhandled debug opcode.");
8681 }
else if (
User || ++Count > 20)
8686 bool CCKilled = (LastMI->
killsRegister(SystemZ::CC,
nullptr) ||
8718 for (
auto *SelMI : Selects)
8719 SelMI->eraseFromParent();
8722 for (
auto *DbgMI : DbgValues)
8723 MBB->
splice(InsertPos, StartMBB, DbgMI);
8734 unsigned StoreOpcode,
8735 unsigned STOCOpcode,
8736 bool Invert)
const {
8741 int64_t Disp =
MI.getOperand(2).getImm();
8742 Register IndexReg =
MI.getOperand(3).getReg();
8743 unsigned CCValid =
MI.getOperand(4).getImm();
8744 unsigned CCMask =
MI.getOperand(5).getImm();
8747 StoreOpcode =
TII->getOpcodeForOffset(StoreOpcode, Disp);
8752 for (
auto *
I :
MI.memoperands())
8761 if (STOCOpcode && !IndexReg && Subtarget.hasLoadStoreOnCond()) {
8773 MI.eraseFromParent();
8787 if (!
MI.killsRegister(SystemZ::CC,
nullptr) &&
8814 MI.eraseFromParent();
8850 int HiOpcode =
Unsigned? SystemZ::VECLG : SystemZ::VECG;
8869 Register Temp =
MRI.createVirtualRegister(&SystemZ::VR128BitRegClass);
8877 MI.eraseFromParent();
8888 bool Invert)
const {
8897 int64_t Disp =
MI.getOperand(2).getImm();
8899 Register BitShift =
MI.getOperand(4).getReg();
8900 Register NegBitShift =
MI.getOperand(5).getReg();
8901 unsigned BitSize =
MI.getOperand(6).getImm();
8905 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
8906 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
8907 assert(LOpcode && CSOpcode &&
"Displacement out of range");
8910 Register OrigVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8911 Register OldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8912 Register NewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8913 Register RotatedOldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8914 Register RotatedNewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8945 Register Tmp =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8950 }
else if (BinOpcode)
8973 MI.eraseFromParent();
8984 unsigned KeepOldMask)
const {
8992 int64_t Disp =
MI.getOperand(2).getImm();
8994 Register BitShift =
MI.getOperand(4).getReg();
8995 Register NegBitShift =
MI.getOperand(5).getReg();
8996 unsigned BitSize =
MI.getOperand(6).getImm();
9000 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
9001 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
9002 assert(LOpcode && CSOpcode &&
"Displacement out of range");
9005 Register OrigVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
9006 Register OldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
9007 Register NewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
9008 Register RotatedOldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
9009 Register RotatedAltVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
9010 Register RotatedNewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
9077 MI.eraseFromParent();
9093 int64_t Disp =
MI.getOperand(2).getImm();
9095 Register OrigSwapVal =
MI.getOperand(4).getReg();
9096 Register BitShift =
MI.getOperand(5).getReg();
9097 Register NegBitShift =
MI.getOperand(6).getReg();
9098 int64_t BitSize =
MI.getOperand(7).getImm();
9104 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
9105 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
9106 unsigned ZExtOpcode = BitSize == 8 ? SystemZ::LLCR : SystemZ::LLHR;
9107 assert(LOpcode && CSOpcode &&
"Displacement out of range");
9110 Register OrigOldVal =
MRI.createVirtualRegister(RC);
9113 Register StoreVal =
MRI.createVirtualRegister(RC);
9114 Register OldValRot =
MRI.createVirtualRegister(RC);
9115 Register RetryOldVal =
MRI.createVirtualRegister(RC);
9116 Register RetrySwapVal =
MRI.createVirtualRegister(RC);
9191 if (!
MI.registerDefIsDead(SystemZ::CC,
nullptr))
9194 MI.eraseFromParent();
9207 .
add(
MI.getOperand(1))
9208 .
addImm(SystemZ::subreg_h64)
9209 .
add(
MI.getOperand(2))
9210 .
addImm(SystemZ::subreg_l64);
9211 MI.eraseFromParent();
9220 bool ClearEven)
const {
9228 Register In128 =
MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
9232 Register NewIn128 =
MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
9233 Register Zero64 =
MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
9244 MI.eraseFromParent();
9251 unsigned Opcode,
bool IsMemset)
const {
9258 uint64_t DestDisp =
MI.getOperand(1).getImm();
9264 if (!isUInt<12>(Disp)) {
9265 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9266 unsigned Opcode =
TII->getOpcodeForOffset(SystemZ::LA, Disp);
9276 SrcDisp =
MI.getOperand(3).getImm();
9279 SrcDisp = DestDisp++;
9280 foldDisplIfNeeded(DestBase, DestDisp);
9284 bool IsImmForm = LengthMO.
isImm();
9285 bool IsRegForm = !IsImmForm;
9292 unsigned Length) ->
void {
9311 bool NeedsLoop =
false;
9313 Register LenAdjReg = SystemZ::NoRegister;
9315 ImmLength = LengthMO.
getImm();
9316 ImmLength += IsMemset ? 2 : 1;
9317 if (ImmLength == 0) {
9318 MI.eraseFromParent();
9321 if (Opcode == SystemZ::CLC) {
9322 if (ImmLength > 3 * 256)
9332 }
else if (ImmLength > 6 * 256)
9340 LenAdjReg = LengthMO.
getReg();
9346 (Opcode == SystemZ::CLC && (ImmLength > 256 || NeedsLoop)
9352 MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
9354 TII->loadImmediate(*
MBB,
MI, StartCountReg, ImmLength / 256);
9365 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9369 if (DestBase.
isReg() && DestBase.
getReg() == SystemZ::NoRegister)
9370 DestBase = loadZeroAddress();
9371 if (SrcBase.
isReg() && SrcBase.
getReg() == SystemZ::NoRegister)
9372 SrcBase = HaveSingleBase ? DestBase : loadZeroAddress();
9382 (HaveSingleBase ? StartSrcReg :
forceReg(
MI, DestBase,
TII));
9385 Register ThisSrcReg =
MRI.createVirtualRegister(RC);
9387 (HaveSingleBase ? ThisSrcReg :
MRI.createVirtualRegister(RC));
9388 Register NextSrcReg =
MRI.createVirtualRegister(RC);
9390 (HaveSingleBase ? NextSrcReg :
MRI.createVirtualRegister(RC));
9391 RC = &SystemZ::GR64BitRegClass;
9392 Register ThisCountReg =
MRI.createVirtualRegister(RC);
9393 Register NextCountReg =
MRI.createVirtualRegister(RC);
9419 MBB = MemsetOneCheckMBB;
9462 if (EndMBB && !ImmLength)
9484 if (!HaveSingleBase)
9491 if (Opcode == SystemZ::MVC)
9518 if (!HaveSingleBase)
9540 Register RemSrcReg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9541 Register RemDestReg = HaveSingleBase ? RemSrcReg
9542 :
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9546 if (!HaveSingleBase)
9562 if (Opcode != SystemZ::MVC) {
9572 while (ImmLength > 0) {
9576 foldDisplIfNeeded(DestBase, DestDisp);
9577 foldDisplIfNeeded(SrcBase, SrcDisp);
9578 insertMemMemOp(
MBB,
MI, DestBase, DestDisp, SrcBase, SrcDisp, ThisLength);
9579 DestDisp += ThisLength;
9580 SrcDisp += ThisLength;
9581 ImmLength -= ThisLength;
9584 if (EndMBB && ImmLength > 0) {
9600 MI.eraseFromParent();
9613 uint64_t End1Reg =
MI.getOperand(0).getReg();
9614 uint64_t Start1Reg =
MI.getOperand(1).getReg();
9615 uint64_t Start2Reg =
MI.getOperand(2).getReg();
9616 uint64_t CharReg =
MI.getOperand(3).getReg();
9619 uint64_t This1Reg =
MRI.createVirtualRegister(RC);
9620 uint64_t This2Reg =
MRI.createVirtualRegister(RC);
9659 MI.eraseFromParent();
9666 bool NoFloat)
const {
9672 MI.setDesc(
TII->get(Opcode));
9676 uint64_t Control =
MI.getOperand(2).getImm();
9677 static const unsigned GPRControlBit[16] = {
9678 0x8000, 0x8000, 0x4000, 0x4000, 0x2000, 0x2000, 0x1000, 0x1000,
9679 0x0800, 0x0800, 0x0400, 0x0400, 0x0200, 0x0200, 0x0100, 0x0100
9681 Control |= GPRControlBit[15];
9683 Control |= GPRControlBit[11];
9684 MI.getOperand(2).setImm(Control);
9687 for (
int I = 0;
I < 16;
I++) {
9688 if ((Control & GPRControlBit[
I]) == 0) {
9695 if (!NoFloat && (Control & 4) != 0) {
9696 if (Subtarget.hasVector()) {
9728 MI.eraseFromParent();
9741 Register SizeReg =
MI.getOperand(2).getReg();
9753 Register PHIReg =
MRI->createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9754 Register IncReg =
MRI->createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9819 MI.eraseFromParent();
9823SDValue SystemZTargetLowering::
9834 switch (
MI.getOpcode()) {
9835 case SystemZ::ADJCALLSTACKDOWN:
9836 case SystemZ::ADJCALLSTACKUP:
9837 return emitAdjCallStack(
MI,
MBB);
9839 case SystemZ::Select32:
9840 case SystemZ::Select64:
9841 case SystemZ::Select128:
9842 case SystemZ::SelectF32:
9843 case SystemZ::SelectF64:
9844 case SystemZ::SelectF128:
9845 case SystemZ::SelectVR32:
9846 case SystemZ::SelectVR64:
9847 case SystemZ::SelectVR128:
9848 return emitSelect(
MI,
MBB);
9850 case SystemZ::CondStore8Mux:
9851 return emitCondStore(
MI,
MBB, SystemZ::STCMux, 0,
false);
9852 case SystemZ::CondStore8MuxInv:
9853 return emitCondStore(
MI,
MBB, SystemZ::STCMux, 0,
true);
9854 case SystemZ::CondStore16Mux:
9855 return emitCondStore(
MI,
MBB, SystemZ::STHMux, 0,
false);
9856 case SystemZ::CondStore16MuxInv:
9857 return emitCondStore(
MI,
MBB, SystemZ::STHMux, 0,
true);
9858 case SystemZ::CondStore32Mux:
9859 return emitCondStore(
MI,
MBB, SystemZ::STMux, SystemZ::STOCMux,
false);
9860 case SystemZ::CondStore32MuxInv:
9861 return emitCondStore(
MI,
MBB, SystemZ::STMux, SystemZ::STOCMux,
true);
9862 case SystemZ::CondStore8:
9863 return emitCondStore(
MI,
MBB, SystemZ::STC, 0,
false);
9864 case SystemZ::CondStore8Inv:
9865 return emitCondStore(
MI,
MBB, SystemZ::STC, 0,
true);
9866 case SystemZ::CondStore16:
9867 return emitCondStore(
MI,
MBB, SystemZ::STH, 0,
false);
9868 case SystemZ::CondStore16Inv:
9869 return emitCondStore(
MI,
MBB, SystemZ::STH, 0,
true);
9870 case SystemZ::CondStore32:
9871 return emitCondStore(
MI,
MBB, SystemZ::ST, SystemZ::STOC,
false);
9872 case SystemZ::CondStore32Inv:
9873 return emitCondStore(
MI,
MBB, SystemZ::ST, SystemZ::STOC,
true);
9874 case SystemZ::CondStore64:
9875 return emitCondStore(
MI,
MBB, SystemZ::STG, SystemZ::STOCG,
false);
9876 case SystemZ::CondStore64Inv:
9877 return emitCondStore(
MI,
MBB, SystemZ::STG, SystemZ::STOCG,
true);
9878 case SystemZ::CondStoreF32:
9879 return emitCondStore(
MI,
MBB, SystemZ::STE, 0,
false);
9880 case SystemZ::CondStoreF32Inv:
9881 return emitCondStore(
MI,
MBB, SystemZ::STE, 0,
true);
9882 case SystemZ::CondStoreF64:
9883 return emitCondStore(
MI,
MBB, SystemZ::STD, 0,
false);
9884 case SystemZ::CondStoreF64Inv:
9885 return emitCondStore(
MI,
MBB, SystemZ::STD, 0,
true);
9887 case SystemZ::SCmp128Hi:
9888 return emitICmp128Hi(
MI,
MBB,
false);
9889 case SystemZ::UCmp128Hi:
9890 return emitICmp128Hi(
MI,
MBB,
true);
9892 case SystemZ::PAIR128:
9893 return emitPair128(
MI,
MBB);
9894 case SystemZ::AEXT128:
9895 return emitExt128(
MI,
MBB,
false);
9896 case SystemZ::ZEXT128:
9897 return emitExt128(
MI,
MBB,
true);
9899 case SystemZ::ATOMIC_SWAPW:
9900 return emitAtomicLoadBinary(
MI,
MBB, 0);
9902 case SystemZ::ATOMIC_LOADW_AR:
9903 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::AR);
9904 case SystemZ::ATOMIC_LOADW_AFI:
9905 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::AFI);
9907 case SystemZ::ATOMIC_LOADW_SR:
9908 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::SR);
9910 case SystemZ::ATOMIC_LOADW_NR:
9911 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NR);
9912 case SystemZ::ATOMIC_LOADW_NILH:
9913 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NILH);
9915 case SystemZ::ATOMIC_LOADW_OR:
9916 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::OR);
9917 case SystemZ::ATOMIC_LOADW_OILH:
9918 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::OILH);
9920 case SystemZ::ATOMIC_LOADW_XR:
9921 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::XR);
9922 case SystemZ::ATOMIC_LOADW_XILF:
9923 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::XILF);
9925 case SystemZ::ATOMIC_LOADW_NRi:
9926 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NR,
true);
9927 case SystemZ::ATOMIC_LOADW_NILHi:
9928 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NILH,
true);
9930 case SystemZ::ATOMIC_LOADW_MIN:
9932 case SystemZ::ATOMIC_LOADW_MAX:
9934 case SystemZ::ATOMIC_LOADW_UMIN:
9936 case SystemZ::ATOMIC_LOADW_UMAX:
9939 case SystemZ::ATOMIC_CMP_SWAPW:
9940 return emitAtomicCmpSwapW(
MI,
MBB);
9941 case SystemZ::MVCImm:
9942 case SystemZ::MVCReg:
9943 return emitMemMemWrapper(
MI,
MBB, SystemZ::MVC);
9944 case SystemZ::NCImm:
9945 return emitMemMemWrapper(
MI,
MBB, SystemZ::NC);
9946 case SystemZ::OCImm:
9947 return emitMemMemWrapper(
MI,
MBB, SystemZ::OC);
9948 case SystemZ::XCImm:
9949 case SystemZ::XCReg:
9950 return emitMemMemWrapper(
MI,
MBB, SystemZ::XC);
9951 case SystemZ::CLCImm:
9952 case SystemZ::CLCReg:
9953 return emitMemMemWrapper(
MI,
MBB, SystemZ::CLC);
9954 case SystemZ::MemsetImmImm:
9955 case SystemZ::MemsetImmReg:
9956 case SystemZ::MemsetRegImm:
9957 case SystemZ::MemsetRegReg:
9958 return emitMemMemWrapper(
MI,
MBB, SystemZ::MVC,
true);
9959 case SystemZ::CLSTLoop:
9960 return emitStringWrapper(
MI,
MBB, SystemZ::CLST);
9961 case SystemZ::MVSTLoop:
9962 return emitStringWrapper(
MI,
MBB, SystemZ::MVST);
9963 case SystemZ::SRSTLoop:
9964 return emitStringWrapper(
MI,
MBB, SystemZ::SRST);
9965 case SystemZ::TBEGIN:
9966 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGIN,
false);
9967 case SystemZ::TBEGIN_nofloat:
9968 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGIN,
true);
9969 case SystemZ::TBEGINC:
9970 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGINC,
true);
9971 case SystemZ::LTEBRCompare_Pseudo:
9972 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTEBR);
9973 case SystemZ::LTDBRCompare_Pseudo:
9974 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTDBR);
9975 case SystemZ::LTXBRCompare_Pseudo:
9976 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTXBR);
9978 case SystemZ::PROBED_ALLOCA:
9979 return emitProbedAlloca(
MI,
MBB);
9980 case SystemZ::EH_SjLj_SetJmp:
9982 case SystemZ::EH_SjLj_LongJmp:
9985 case TargetOpcode::STACKMAP:
9986 case TargetOpcode::PATCHPOINT:
9997SystemZTargetLowering::getRepRegClassFor(
MVT VT)
const {
9998 if (VT == MVT::Untyped)
9999 return &SystemZ::ADDR128BitRegClass;
10025 DAG.
getMachineNode(SystemZ::EFPC, dl, {MVT::i32, MVT::Other}, Chain), 0);
10045 EVT VT =
Op.getValueType();
10046 Op =
Op.getOperand(0);
10047 EVT OpVT =
Op.getValueType();
10049 assert(OpVT.
isVector() &&
"Operand type for VECREDUCE_ADD is not a vector.");
10081bool SystemZTargetLowering::isFullyInternal(
const Function *Fn)
const {
10085 if (
auto *CB = dyn_cast<CallBase>(U)) {
10086 if (CB->getCalledFunction() != Fn)
10097 if (Attrs.hasRetAttrs())
10099 OS << *
F->getReturnType() <<
" @" <<
F->getName() <<
"(";
10100 for (
unsigned I = 0, E = FT->getNumParams();
I != E; ++
I) {
10103 OS << *FT->getParamType(
I);
10105 for (
auto A : {Attribute::SExt, Attribute::ZExt, Attribute::NoExt})
10112void SystemZTargetLowering::
10115 bool IsInternal =
false;
10116 const Function *CalleeFn =
nullptr;
10117 if (
auto *
G = dyn_cast<GlobalAddressSDNode>(Callee))
10118 if ((CalleeFn = dyn_cast<Function>(
G->getGlobal())))
10119 IsInternal = isFullyInternal(CalleeFn);
10120 if (!verifyNarrowIntegerArgs(Outs, IsInternal)) {
10121 errs() <<
"ERROR: Missing extension attribute of passed "
10122 <<
"value in call to function:\n" <<
"Callee: ";
10123 if (CalleeFn !=
nullptr)
10127 errs() <<
"Caller: ";
10133void SystemZTargetLowering::
10136 if (!verifyNarrowIntegerArgs(Outs, isFullyInternal(
F))) {
10137 errs() <<
"ERROR: Missing extension attribute of returned "
10138 <<
"value from function:\n";
10146bool SystemZTargetLowering::
10148 bool IsInternal)
const {
10163 for (
unsigned i = 0; i < Outs.
size(); ++i) {
10164 MVT VT = Outs[i].VT;
10168 "Unexpected integer argument VT.");
10169 if (VT == MVT::i32 &&
unsigned const MachineRegisterInfo * MRI
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
AMDGPU Register Bank Select
static bool isZeroVector(SDValue N)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
static bool isUndef(ArrayRef< int > Mask)
unsigned const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
static constexpr Register SPReg
static bool isSelectPseudo(MachineInstr &MI)
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static void printFunctionArgExts(const Function *F, raw_fd_ostream &OS)
static void adjustForLTGFR(Comparison &C)
static void adjustSubwordCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static SDValue joinDwords(SelectionDAG &DAG, const SDLoc &DL, SDValue Op0, SDValue Op1)
static cl::opt< bool > EnableIntArgExtCheck("argext-abi-check", cl::init(false), cl::desc("Verify that narrow int args are properly extended per the " "SystemZ ABI."))
static bool isOnlyUsedByStores(SDValue StoredVal, SelectionDAG &DAG)
static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT, unsigned Opcode, SDValue Op0, SDValue Op1, SDValue &Even, SDValue &Odd)
static void adjustForRedundantAnd(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static SDValue lowerAddrSpaceCast(SDValue Op, SelectionDAG &DAG)
static SDValue buildScalarToVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Value)
static SDValue lowerI128ToGR128(SelectionDAG &DAG, SDValue In)
static bool isSimpleShift(SDValue N, unsigned &ShiftVal)
static bool isI128MovedToParts(LoadSDNode *LD, SDNode *&LoPart, SDNode *&HiPart)
static bool chooseShuffleOpNos(int *OpNos, unsigned &OpNo0, unsigned &OpNo1)
static uint32_t findZeroVectorIdx(SDValue *Ops, unsigned Num)
static bool isVectorElementSwap(ArrayRef< int > M, EVT VT)
static void getCSAddressAndShifts(SDValue Addr, SelectionDAG &DAG, SDLoc DL, SDValue &AlignedAddr, SDValue &BitShift, SDValue &NegBitShift)
static bool isShlDoublePermute(const SmallVectorImpl< int > &Bytes, unsigned &StartIndex, unsigned &OpNo0, unsigned &OpNo1)
static SDValue getPermuteNode(SelectionDAG &DAG, const SDLoc &DL, const Permute &P, SDValue Op0, SDValue Op1)
static SDNode * emitIntrinsicWithCCAndChain(SelectionDAG &DAG, SDValue Op, unsigned Opcode)
static SDValue getCCResult(SelectionDAG &DAG, SDValue CCReg)
static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode, unsigned &CCValid)
static void lowerMUL_LOHI32(SelectionDAG &DAG, const SDLoc &DL, unsigned Extend, SDValue Op0, SDValue Op1, SDValue &Hi, SDValue &Lo)
static bool isF128MovedToParts(LoadSDNode *LD, SDNode *&LoPart, SDNode *&HiPart)
static void createPHIsForSelects(SmallVector< MachineInstr *, 8 > &Selects, MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB, MachineBasicBlock *SinkMBB)
static SDValue getGeneralPermuteNode(SelectionDAG &DAG, const SDLoc &DL, SDValue *Ops, const SmallVectorImpl< int > &Bytes)
static unsigned getVectorComparisonOrInvert(ISD::CondCode CC, CmpMode Mode, bool &Invert)
static unsigned CCMaskForCondCode(ISD::CondCode CC)
static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static void adjustForFNeg(Comparison &C)
static bool isScalarToVector(SDValue Op)
static SDValue emitSETCC(SelectionDAG &DAG, const SDLoc &DL, SDValue CCReg, unsigned CCValid, unsigned CCMask)
static bool matchPermute(const SmallVectorImpl< int > &Bytes, const Permute &P, unsigned &OpNo0, unsigned &OpNo1)
static bool isAddCarryChain(SDValue Carry)
static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static MachineOperand earlyUseOperand(MachineOperand Op)
static bool canUseSiblingCall(const CCState &ArgCCInfo, SmallVectorImpl< CCValAssign > &ArgLocs, SmallVectorImpl< ISD::OutputArg > &Outs)
static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask)
static bool getzOSCalleeAndADA(SelectionDAG &DAG, SDValue &Callee, SDValue &ADA, SDLoc &DL, SDValue &Chain)
static bool shouldSwapCmpOperands(const Comparison &C)
static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType)
static SDValue getADAEntry(SelectionDAG &DAG, SDValue Val, SDLoc DL, unsigned Offset, bool LoadAdr=false)
static SDNode * emitIntrinsicWithCC(SelectionDAG &DAG, SDValue Op, unsigned Opcode)
static void adjustForSubtraction(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static bool getVPermMask(SDValue ShuffleOp, SmallVectorImpl< int > &Bytes)
static const Permute PermuteForms[]
static bool isI128MovedFromParts(SDValue Val, SDValue &LoPart, SDValue &HiPart)
static bool isSubBorrowChain(SDValue Carry)
static void adjustICmp128(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static APInt getDemandedSrcElements(SDValue Op, const APInt &DemandedElts, unsigned OpNo)
static SDValue getAbsolute(SelectionDAG &DAG, const SDLoc &DL, SDValue Op, bool IsNegative)
static unsigned computeNumSignBitsBinOp(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth, unsigned OpNo)
static SDValue expandBitCastI128ToF128(SelectionDAG &DAG, SDValue Src, const SDLoc &SL)
static SDValue tryBuildVectorShuffle(SelectionDAG &DAG, BuildVectorSDNode *BVN)
static unsigned getVectorComparison(ISD::CondCode CC, CmpMode Mode)
static SDValue lowerGR128ToI128(SelectionDAG &DAG, SDValue In)
static SDValue MergeInputChains(SDNode *N1, SDNode *N2)
static SDValue expandBitCastF128ToI128(SelectionDAG &DAG, SDValue Src, const SDLoc &SL)
static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask, uint64_t Mask, uint64_t CmpVal, unsigned ICmpType)
static bool isIntrinsicWithCC(SDValue Op, unsigned &Opcode, unsigned &CCValid)
static SDValue expandV4F32ToV2F64(SelectionDAG &DAG, int Start, const SDLoc &DL, SDValue Op, SDValue Chain)
static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1, ISD::CondCode Cond, const SDLoc &DL, SDValue Chain=SDValue(), bool IsSignaling=false)
static bool checkCCKill(MachineInstr &MI, MachineBasicBlock *MBB)
static Register forceReg(MachineInstr &MI, MachineOperand &Base, const SystemZInstrInfo *TII)
static bool is32Bit(EVT VT)
static std::pair< unsigned, const TargetRegisterClass * > parseRegisterNumber(StringRef Constraint, const TargetRegisterClass *RC, const unsigned *Map, unsigned Size)
static bool matchDoublePermute(const SmallVectorImpl< int > &Bytes, const Permute &P, SmallVectorImpl< int > &Transform)
static Comparison getIntrinsicCmp(SelectionDAG &DAG, unsigned Opcode, SDValue Call, unsigned CCValid, uint64_t CC, ISD::CondCode Cond)
static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg)
static AddressingMode getLoadStoreAddrMode(bool HasVector, Type *Ty)
static SDValue buildMergeScalars(SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Op0, SDValue Op1)
static void computeKnownBitsBinOp(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth, unsigned OpNo)
static bool getShuffleInput(const SmallVectorImpl< int > &Bytes, unsigned Start, unsigned BytesPerElement, int &Base)
static AddressingMode supportedAddressingMode(Instruction *I, bool HasVector)
static bool isF128MovedFromParts(SDValue Val, SDValue &LoPart, SDValue &HiPart)
static void adjustZeroCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
APInt zext(unsigned width) const
Zero extend to a new width.
uint64_t getZExtValue() const
Get zero extended value.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
unsigned getActiveBits() const
Compute the number of active bits in the value.
APInt trunc(unsigned width) const
Truncate to new width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isSingleWord() const
Determine if this APInt just has one word to store value.
void insertBits(const APInt &SubBits, unsigned bitPosition)
Insert the bits from a smaller APInt starting at bitPosition.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
an instruction that atomically reads a memory location, combines it with another value,...
BinOp getOperation() const
bool hasAttribute(Attribute::AttrKind Kind) const
Return true if the attribute exists in this set.
StringRef getValueAsString() const
Return the attribute's value as a string.
static StringRef getNameFromAttrKind(Attribute::AttrKind AttrKind)
LLVM Basic Block Representation.
The address of a basic block.
A "pseudo-class" with methods for operating on BUILD_VECTORs.
bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector.
CCState - This class holds information needed while lowering arguments and return values.
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
LocInfo getLocInfo() const
int64_t getLocMemOffset() const
This class represents a function call, abstracting a target machine's calling convention.
This is an important base class in LLVM.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
iterator find(const_arg_type_t< KeyT > Val)
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
uint64_t getFnAttributeAsParsedInteger(StringRef Kind, uint64_t Default=0) const
For a string attribute Kind, parse attribute as an integer.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
const GlobalObject * getAliaseeObject() const
bool hasLocalLinkage() const
bool hasPrivateLinkage() const
bool hasInternalLinkage() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
This class is used to represent ISD::LOAD nodes.
static auto integer_fixedlen_vector_valuetypes()
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
static MVT getVectorVT(MVT VT, unsigned NumElements)
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator_range< succ_iterator > successors()
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
void setMachineBlockAddressTaken()
Set this block to indicate that its address is used as something other than the target of a terminato...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setMaxCallFrameSize(uint64_t S)
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setAdjustsStack(bool V)
void setFrameAddressIsTaken(bool T)
uint64_t getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
void setReturnAddressIsTaken(bool s)
MachineFunctionProperties & reset(Property P)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
void push_back(MachineBasicBlock *MBB)
reverse_iterator rbegin()
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineFunctionProperties & getProperties() const
Get the function properties.
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr reads the specified register.
bool killsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr kills the specified register.
bool definesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr fully defines the specified register.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
Register getReg() const
getReg - Returns the register number.
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
Represents one node in the SelectionDAG.
bool hasOneUse() const
Return true if there is exactly one use of this node.
SDNodeFlags getFlags() const
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
iterator_range< user_iterator > users()
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
void setFlags(SDNodeFlags NewFlags)
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
bool isMachineOpcode() const
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
const APInt & getConstantOperandAPInt(unsigned i) const
uint64_t getScalarValueSizeInBits() const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
unsigned getMachineOpcode() const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, Register Reg, SDValue N)
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
bool isConstantIntBuildVectorOrConstantInt(SDValue N, bool AllowOpaques=true) const
Test whether the given value is a constant int or similar node.
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
SDValue getRegister(Register Reg, EVT VT)
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getGLOBAL_OFFSET_TABLE(EVT VT)
Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, MachineMemOperand *MMO)
Gets a node for an atomic op, produces result (if relevant) and chain and takes 2 operands.
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
const TargetLowering & getTargetLoweringInfo() const
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, Register Reg, EVT VT)
const DataLayout & getDataLayout() const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getSignedTargetConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSignedConstant(int64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
bool SignBitIsZero(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero.
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getRegisterMask(const uint32_t *RegMask)
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
ArrayRef< int > getMask() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
constexpr size_t size() const
size - Get the string size.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
A SystemZ-specific class detailing special use registers particular for calling conventions.
virtual int getStackPointerBias()=0
virtual int getReturnFunctionAddressRegister()=0
virtual int getCallFrameSize()=0
virtual int getStackPointerRegister()=0
A SystemZ-specific constant pool value.
static SystemZConstantPoolValue * Create(const GlobalValue *GV, SystemZCP::SystemZCPModifier Modifier)
unsigned getVarArgsFrameIndex() const
void setVarArgsFrameIndex(unsigned FI)
void setRegSaveFrameIndex(unsigned FI)
void incNumLocalDynamicTLSAccesses()
Register getVarArgsFirstGPR() const
void setADAVirtualRegister(Register Reg)
void setVarArgsFirstGPR(Register GPR)
Register getADAVirtualRegister() const
void setSizeOfFnParams(unsigned Size)
void setVarArgsFirstFPR(Register FPR)
unsigned getRegSaveFrameIndex() const
Register getVarArgsFirstFPR() const
const SystemZInstrInfo * getInstrInfo() const override
bool isPC32DBLSymbol(const GlobalValue *GV, CodeModel::Model CM) const
const TargetFrameLowering * getFrameLowering() const override
bool isTargetXPLINK64() const
SystemZCallingConventionRegisters * getSpecialRegisters() const
const SystemZRegisterInfo * getRegisterInfo() const override
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes) const override
Returns the target specific optimal type for load and store operations as a result of memset,...
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
bool hasInlineStackProbe(const MachineFunction &MF) const override
Returns true if stack probing through inline assembly is requested.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *BB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
MachineBasicBlock * emitEHSjLjSetJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
AtomicExpansionKind shouldCastAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be cast by the IR-level AtomicExpand pass.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &, EVT) const override
Return the ValueType of the result of SETCC operations.
bool allowTruncateForTailCall(Type *, Type *) const override
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
MachineBasicBlock * emitEHSjLjLongJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
bool findOptimalMemOpLowering(std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const override
Determines the optimal series of memory ops to replace the memset / memcpy.
bool useSoftFloat() const override
std::pair< SDValue, SDValue > makeExternalCall(SDValue Chain, SelectionDAG &DAG, const char *CalleeName, EVT RetVT, ArrayRef< SDValue > Ops, CallingConv::ID CallConv, bool IsSigned, SDLoc DL, bool DoesNotReturn, bool IsReturnValueUsed) const
bool mayBeEmittedAsTailCall(const CallInst *CI) const override
Return true if the target may be able emit the call instruction as a tail call.
bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const override
Target-specific splitting of values into parts that fit a register storing a legal type.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
SystemZTargetLowering(const TargetMachine &TM, const SystemZSubtarget &STI)
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
bool isLegalICmpImmediate(int64_t Imm) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
TargetLowering::ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const override
Determine if the target supports unaligned memory accesses.
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
TargetLowering::ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const override
Target-specific combining of register parts into its original value.
bool isTruncateFree(Type *, Type *) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const override
Determine the number of bits in the operation that are sign bits.
void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
bool isLegalAddImmediate(int64_t Imm) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
bool isGuaranteedNotToBeUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, unsigned Depth) const override
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
AtomicExpansionKind shouldCastAtomicStoreInIR(StoreInst *SI) const override
Returns how the given (atomic) store should be cast by the IR-level AtomicExpand pass into.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
unsigned getStackProbeSize(const MachineFunction &MF) const
XPLINK64 calling convention specific use registers Particular to z/OS when in 64 bit mode.
int getCallFrameSize() final
Information about stack frame layout on the target.
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
bool hasFP(const MachineFunction &MF) const
hasFP - Return true if the specified function should have a dedicated frame pointer register.
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setAtomicLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Let target indicate that an extending atomic load of the specified type is legal.
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
virtual const TargetRegisterClass * getRepRegClassFor(MVT VT) const
Return the 'representative' register class for the specified value type.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
virtual bool shouldSignExtendTypeInLibCall(Type *Ty, bool IsSigned) const
Returns true if arguments should be sign-extended in lib calls.
std::vector< ArgListEntry > ArgListTy
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool findOptimalMemOpLowering(std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const
Determines the optimal series of memory ops to replace the memset / memcpy.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Primary interface to the complete machine description for the target machine.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
unsigned getPointerSize(unsigned AS) const
Get the pointer size for this target.
CodeModel::Model getCodeModel() const
Returns the code model.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
A Use represents the edge between a Value definition and its users.
User * getUser() const
Returns the User that contains this Use.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
user_iterator user_begin()
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
constexpr ScalarTy getFixedValue() const
self_iterator getIterator()
A raw_ostream that writes to a file descriptor.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ EH_SJLJ_LONGJMP
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, val, ptr) This corresponds to "store atomic" instruction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ MEMBARRIER
MEMBARRIER - Compiler barrier only; generate a no-op.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ STRICT_FSQRT
Constrained versions of libm-equivalent floating point intrinsics.
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
@ BR_JT
BR_JT - Jumptable branch.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ UNDEF
UNDEF - An undefined node.
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ GET_ROUNDING
Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest, ties to even 2 Round to ...
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ STRICT_SINT_TO_FP
STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to a floating point value.
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ STRICT_FADD
Constrained versions of the binary floating point operators.
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ ADDRSPACECAST
ADDRSPACECAST - This operator converts between pointers of different address spaces.
@ EH_SJLJ_SETJMP
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj....
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ BRCOND
BRCOND - Conditional branch.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ GET_DYNAMIC_AREA_OFFSET
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isNormalStore(const SDNode *N)
Returns true if the specified node is a non-truncating and unindexed store.
CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
ID ArrayRef< Type * > Tys
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
@ Define
Register definition.
@ System
Synchronized with respect to all concurrently executing threads.
@ MO_ADA_DATA_SYMBOL_ADDR
@ MO_ADA_DIRECT_FUNC_DESC
@ MO_ADA_INDIRECT_FUNC_DESC
const unsigned GR64Regs[16]
const unsigned VR128Regs[32]
const unsigned GR128Regs[16]
const unsigned FP32Regs[16]
const unsigned GR32Regs[16]
const unsigned FP64Regs[16]
const int64_t ELFCallFrameSize
const unsigned VR64Regs[32]
const unsigned FP128Regs[16]
const unsigned VR32Regs[32]
unsigned odd128(bool Is32bit)
const unsigned CCMASK_CMP_GE
static bool isImmHH(uint64_t Val)
const unsigned CCMASK_TEND
const unsigned CCMASK_CS_EQ
const unsigned CCMASK_TBEGIN
const MCPhysReg ELFArgFPRs[ELFNumArgFPRs]
MachineBasicBlock * splitBlockBefore(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
const unsigned CCMASK_TM_SOME_1
const unsigned CCMASK_LOGICAL_CARRY
const unsigned TDCMASK_NORMAL_MINUS
const unsigned CCMASK_TDC
const unsigned CCMASK_FCMP
const unsigned CCMASK_TM_SOME_0
static bool isImmHL(uint64_t Val)
const unsigned TDCMASK_SUBNORMAL_MINUS
const unsigned TDCMASK_NORMAL_PLUS
const unsigned CCMASK_CMP_GT
const unsigned TDCMASK_QNAN_MINUS
const unsigned CCMASK_ANY
const unsigned CCMASK_ARITH
const unsigned CCMASK_TM_MIXED_MSB_0
const unsigned TDCMASK_SUBNORMAL_PLUS
static bool isImmLL(uint64_t Val)
const unsigned VectorBits
static bool isImmLH(uint64_t Val)
MachineBasicBlock * emitBlockAfter(MachineBasicBlock *MBB)
const unsigned TDCMASK_INFINITY_PLUS
unsigned reverseCCMask(unsigned CCMask)
const unsigned CCMASK_TM_ALL_0
const unsigned CCMASK_CMP_LE
const unsigned CCMASK_CMP_O
const unsigned CCMASK_CMP_EQ
const unsigned VectorBytes
const unsigned TDCMASK_INFINITY_MINUS
const unsigned CCMASK_ICMP
const unsigned CCMASK_VCMP_ALL
MachineBasicBlock * splitBlockAfter(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
const unsigned CCMASK_VCMP
const unsigned CCMASK_TM_MIXED_MSB_1
const unsigned CCMASK_TM_MSB_0
const unsigned CCMASK_ARITH_OVERFLOW
const unsigned CCMASK_CS_NE
const unsigned TDCMASK_SNAN_PLUS
const unsigned CCMASK_CMP_LT
const unsigned CCMASK_CMP_NE
const unsigned TDCMASK_ZERO_PLUS
const unsigned TDCMASK_QNAN_PLUS
const unsigned TDCMASK_ZERO_MINUS
unsigned even128(bool Is32bit)
const unsigned CCMASK_TM_ALL_1
const unsigned CCMASK_LOGICAL_BORROW
const unsigned ELFNumArgFPRs
const unsigned CCMASK_CMP_UO
const unsigned CCMASK_LOGICAL
const unsigned CCMASK_TM_MSB_1
const unsigned TDCMASK_SNAN_MINUS
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
support::ulittle32_t Word
NodeAddr< CodeNode * > Code
constexpr const char32_t SBase
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
testing::Matcher< const detail::ErrorHolder & > Failed()
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void dumpBytes(ArrayRef< uint8_t > Bytes, raw_ostream &OS)
Convert ‘Bytes’ to a hex string and output to ‘OS’.
T bit_ceil(T Value)
Returns the smallest integral power of two no smaller than Value if Value is nonzero.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
@ Mul
Product of integers.
DWARFExpression::Operation Op
constexpr unsigned BitWidth
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
AddressingMode(bool LongDispl, bool IdxReg)
This struct is a compact representation of a valid (non-zero power of two) alignment.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isRound() const
Return true if the size is a power-of-two number of bytes.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
unsigned getBitWidth() const
Get the bit width of this value.
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
void resetAll()
Resets the known state of all bits.
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
const uint32_t * getNoPreservedMask() const override
SystemZVectorConstantInfo(APInt IntImm)
SmallVector< unsigned, 2 > OpVals
bool isVectorConstantLegal(const SystemZSubtarget &Subtarget)
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This contains information for each constraint that we are lowering.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setZExtResult(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setSExtResult(bool Value=true)
CallLoweringInfo & setNoReturn(bool Value=true)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})