26#include "llvm/IR/IntrinsicsS390.h"
35#define DEBUG_TYPE "systemz-lower"
41 : Op0(Op0In), Op1(Op1In), Chain(ChainIn),
42 Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {}
92 if (Subtarget.hasHighWord())
98 if (Subtarget.hasVector()) {
105 if (Subtarget.hasVectorEnhancements1())
110 if (Subtarget.hasVector()) {
119 if (Subtarget.hasVector())
146 for (
unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
147 I <= MVT::LAST_FP_VALUETYPE;
173 for (
unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
174 I <= MVT::LAST_INTEGER_VALUETYPE;
205 if (Subtarget.hasPopulationCount())
224 if (!Subtarget.hasFPExtension())
230 if (Subtarget.hasFPExtension())
235 if (Subtarget.hasFPExtension())
304 {MVT::i8, MVT::i16, MVT::i32},
Legal);
306 {MVT::i8, MVT::i16},
Legal);
323 if (!Subtarget.hasFPExtension()) {
336 if (Subtarget.hasMiscellaneousExtensions3()) {
429 if (VT != MVT::v2i64)
435 if (Subtarget.hasVectorEnhancements1())
466 if (Subtarget.hasVector()) {
488 if (Subtarget.hasVectorEnhancements2()) {
509 for (
unsigned I = MVT::FIRST_FP_VALUETYPE;
510 I <= MVT::LAST_FP_VALUETYPE;
518 if (Subtarget.hasFPExtension()) {
546 if (Subtarget.hasFPExtension()) {
557 if (Subtarget.hasVector()) {
603 if (Subtarget.hasVectorEnhancements1()) {
610 if (Subtarget.hasVectorEnhancements1()) {
664 for (
auto VT : { MVT::f32, MVT::f64, MVT::f128,
665 MVT::v4f32, MVT::v2f64 }) {
674 if (!Subtarget.hasVectorEnhancements1()) {
680 if (Subtarget.hasVectorEnhancements1())
690 if (Subtarget.hasVectorEnhancements1()) {
702 if (!Subtarget.hasVector()) {
757 struct RTLibCallMapping {
761 static RTLibCallMapping RTLibCallCommon[] = {
762#define HANDLE_LIBCALL(code, name) {RTLIB::code, name},
763#include "ZOSLibcallNames.def"
765 for (
auto &E : RTLibCallCommon)
771 return Subtarget.hasSoftFloat();
793 return Subtarget.hasVectorEnhancements1();
806 if (!Subtarget.hasVector() ||
807 (isFP128 && !Subtarget.hasVectorEnhancements1()))
829 if (SplatBitSize > 64)
835 if (isInt<16>(SignedValue)) {
844 if (
TII->isRxSBGMask(
Value, SplatBitSize, Start,
End)) {
866 uint64_t Lower = SplatUndefZ & maskTrailingOnes<uint64_t>(LowerBits);
867 uint64_t Upper = SplatUndefZ & maskLeadingOnes<uint64_t>(UpperBits);
874 uint64_t Middle = SplatUndefZ & ~Upper & ~Lower;
875 return tryValue(SplatBitsZ | Middle);
890 unsigned HalfSize = Width / 2;
895 if (HighValue != LowValue || 8 > HalfSize)
898 SplatBits = HighValue;
902 SplatBitSize = Width;
910 BVN->
isConstantSplat(IntBits, SplatUndef, SplatBitSize, HasAnyUndefs, 128,
914 BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, 8,
919 bool ForCodeSize)
const {
921 if (Imm.isZero() || Imm.isNegZero())
953 if (Subtarget.hasInterlockedAccess1() &&
967 return isInt<32>(Imm) || isUInt<32>(Imm);
972 return isUInt<32>(Imm) || isUInt<32>(-Imm);
994 LongDisplacement(LongDispl), IndexReg(IdxReg) {}
1017 switch (
II->getIntrinsicID()) {
1019 case Intrinsic::memset:
1020 case Intrinsic::memmove:
1021 case Intrinsic::memcpy:
1026 if (isa<LoadInst>(
I) &&
I->hasOneUse()) {
1027 auto *SingleUser = cast<Instruction>(*
I->user_begin());
1028 if (SingleUser->getParent() ==
I->getParent()) {
1029 if (isa<ICmpInst>(SingleUser)) {
1030 if (
auto *
C = dyn_cast<ConstantInt>(SingleUser->getOperand(1)))
1031 if (
C->getBitWidth() <= 64 &&
1032 (isInt<16>(
C->getSExtValue()) || isUInt<16>(
C->getZExtValue())))
1035 }
else if (isa<StoreInst>(SingleUser))
1039 }
else if (
auto *StoreI = dyn_cast<StoreInst>(
I)) {
1040 if (
auto *LoadI = dyn_cast<LoadInst>(StoreI->getValueOperand()))
1041 if (LoadI->hasOneUse() && LoadI->getParent() ==
I->getParent())
1046 if (HasVector && (isa<LoadInst>(
I) || isa<StoreInst>(
I))) {
1054 Type *MemAccessTy = (isa<LoadInst>(
I) ?
I->getType() :
1055 I->getOperand(0)->getType());
1057 bool IsVectorAccess = MemAccessTy->
isVectorTy();
1061 if (!IsVectorAccess && isa<StoreInst>(
I)) {
1062 Value *DataOp =
I->getOperand(0);
1063 if (isa<ExtractElementInst>(DataOp))
1064 IsVectorAccess =
true;
1069 if (!IsVectorAccess && isa<LoadInst>(
I) &&
I->hasOneUse()) {
1070 User *LoadUser = *
I->user_begin();
1071 if (isa<InsertElementInst>(LoadUser))
1072 IsVectorAccess =
true;
1075 if (IsFPAccess || IsVectorAccess)
1104 return AM.
Scale == 0;
1111 std::vector<EVT> &MemOps,
unsigned Limit,
const MemOp &
Op,
unsigned DstAS,
1112 unsigned SrcAS,
const AttributeList &FuncAttributes)
const {
1113 const int MVCFastLen = 16;
1115 if (Limit != ~
unsigned(0)) {
1117 if (
Op.isMemcpy() &&
Op.allowOverlap() &&
Op.size() <= MVCFastLen)
1119 if (
Op.isMemset() &&
Op.size() - 1 <= MVCFastLen)
1121 if (
Op.isZeroMemset())
1126 SrcAS, FuncAttributes);
1131 return Subtarget.hasVector() ? MVT::v2i64 : MVT::Other;
1135 if (!FromType->isIntegerTy() || !ToType->
isIntegerTy())
1137 unsigned FromBits = FromType->getPrimitiveSizeInBits().getFixedValue();
1139 return FromBits > ToBits;
1147 return FromBits > ToBits;
1156 if (Constraint.
size() == 1) {
1157 switch (Constraint[0]) {
1183 }
else if (Constraint.
size() == 2 && Constraint[0] ==
'Z') {
1184 switch (Constraint[1]) {
1200 const char *constraint)
const {
1202 Value *CallOperandVal =
info.CallOperandVal;
1205 if (!CallOperandVal)
1209 switch (*constraint) {
1227 if (Subtarget.hasVector())
1233 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1234 if (isUInt<8>(
C->getZExtValue()))
1239 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1240 if (isUInt<12>(
C->getZExtValue()))
1245 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1246 if (isInt<16>(
C->getSExtValue()))
1251 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1252 if (isInt<20>(
C->getSExtValue()))
1257 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1258 if (
C->getZExtValue() == 0x7fffffff)
1268static std::pair<unsigned, const TargetRegisterClass *>
1270 const unsigned *Map,
unsigned Size) {
1271 assert(*(Constraint.
end()-1) ==
'}' &&
"Missing '}'");
1272 if (isdigit(Constraint[2])) {
1277 return std::make_pair(Map[
Index], RC);
1279 return std::make_pair(0U,
nullptr);
1282std::pair<unsigned, const TargetRegisterClass *>
1285 if (Constraint.
size() == 1) {
1287 switch (Constraint[0]) {
1292 return std::make_pair(0U, &SystemZ::GR64BitRegClass);
1294 return std::make_pair(0U, &SystemZ::GR128BitRegClass);
1295 return std::make_pair(0U, &SystemZ::GR32BitRegClass);
1299 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass);
1300 else if (VT == MVT::i128)
1301 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass);
1302 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass);
1305 return std::make_pair(0U, &SystemZ::GRH32BitRegClass);
1310 return std::make_pair(0U, &SystemZ::FP64BitRegClass);
1312 return std::make_pair(0U, &SystemZ::FP128BitRegClass);
1313 return std::make_pair(0U, &SystemZ::FP32BitRegClass);
1318 if (Subtarget.hasVector()) {
1320 return std::make_pair(0U, &SystemZ::VR32BitRegClass);
1322 return std::make_pair(0U, &SystemZ::VR64BitRegClass);
1323 return std::make_pair(0U, &SystemZ::VR128BitRegClass);
1332 auto getVTSizeInBits = [&VT]() {
1340 if (Constraint[1] ==
'r') {
1341 if (getVTSizeInBits() == 32)
1344 if (getVTSizeInBits() == 128)
1350 if (Constraint[1] ==
'f') {
1352 return std::make_pair(
1354 if (getVTSizeInBits() == 32)
1357 if (getVTSizeInBits() == 128)
1363 if (Constraint[1] ==
'v') {
1364 if (!Subtarget.hasVector())
1365 return std::make_pair(
1367 if (getVTSizeInBits() == 32)
1370 if (getVTSizeInBits() == 64)
1397 const Constant *PersonalityFn)
const {
1402 const Constant *PersonalityFn)
const {
1410 if (Constraint.
size() == 1) {
1411 switch (Constraint[0]) {
1413 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1414 if (isUInt<8>(
C->getZExtValue()))
1416 Op.getValueType()));
1420 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1421 if (isUInt<12>(
C->getZExtValue()))
1423 Op.getValueType()));
1427 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1428 if (isInt<16>(
C->getSExtValue()))
1430 Op.getValueType()));
1434 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1435 if (isInt<20>(
C->getSExtValue()))
1437 Op.getValueType()));
1441 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1442 if (
C->getZExtValue() == 0x7fffffff)
1444 Op.getValueType()));
1455#include "SystemZGenCallingConv.inc"
1459 static const MCPhysReg ScratchRegs[] = { SystemZ::R0D, SystemZ::R1D,
1465 Type *ToType)
const {
1528 if (BitCastToType == MVT::v2i64)
1555 MVT::Untyped,
Hi,
Lo);
1579 unsigned NumParts,
MVT PartVT, std::optional<CallingConv::ID>
CC)
const {
1581 if (ValueVT.
getSizeInBits() == 128 && NumParts == 1 && PartVT == MVT::Untyped) {
1592 MVT PartVT,
EVT ValueVT, std::optional<CallingConv::ID>
CC)
const {
1593 if (ValueVT.
getSizeInBits() == 128 && NumParts == 1 && PartVT == MVT::Untyped) {
1620 unsigned NumFixedGPRs = 0;
1621 unsigned NumFixedFPRs = 0;
1622 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
1635 RC = &SystemZ::GR32BitRegClass;
1639 RC = &SystemZ::GR64BitRegClass;
1643 RC = &SystemZ::FP32BitRegClass;
1647 RC = &SystemZ::FP64BitRegClass;
1651 RC = &SystemZ::FP128BitRegClass;
1659 RC = &SystemZ::VR128BitRegClass;
1688 ArgValue = DAG.
getLoad(LocVT,
DL, Chain, FIN,
1699 unsigned ArgIndex = Ins[
I].OrigArgIndex;
1700 assert (Ins[
I].PartOffset == 0);
1701 while (
I + 1 != E && Ins[
I + 1].OrigArgIndex == ArgIndex) {
1703 unsigned PartOffset = Ins[
I + 1].PartOffset;
1726 int64_t VarArgOffset = CCInfo.
getStackSize() + Regs->getCallFrameSize();
1744 int64_t RegSaveOffset =
1759 &SystemZ::FP64BitRegClass);
1777 MRI.addLiveIn(Regs->getADARegister(), ADAvReg);
1789 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
1796 if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D)
1798 if (Outs[
I].Flags.isSwiftSelf() || Outs[
I].Flags.isSwiftError())
1805 unsigned Offset,
bool LoadAdr =
false) {
1828 bool LoadAddr =
false;
1829 const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV);
1850 unsigned ADADelta = 0;
1851 unsigned EPADelta = 8;
1856 if (
auto *
G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1857 bool IsInternal = (
G->getGlobal()->hasInternalLinkage() ||
1858 G->getGlobal()->hasPrivateLinkage());
1873 }
else if (
auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1935 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
1941 unsigned ArgIndex = Outs[
I].OrigArgIndex;
1943 if (
I + 1 != E && Outs[
I + 1].OrigArgIndex == ArgIndex) {
1945 Type *OrigArgType = CLI.
Args[Outs[
I].OrigArgIndex].Ty;
1951 SlotVT = Outs[
I].VT;
1954 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
1960 assert (Outs[
I].PartOffset == 0);
1961 while (
I + 1 != E && Outs[
I + 1].OrigArgIndex == ArgIndex) {
1962 SDValue PartValue = OutVals[
I + 1];
1963 unsigned PartOffset = Outs[
I + 1].PartOffset;
1970 SlotVT.
getStoreSize()) &&
"Not enough space for argument part!");
1973 ArgValue = SpillSlot;
1990 if (!StackPtr.getNode())
2012 RegsToPass.
push_back(std::make_pair(SystemZ::R3D, ShadowArgValue));
2018 if (!MemOpChains.
empty())
2031 ->getAddressOfCalleeRegister();
2034 Callee = DAG.
getRegister(CalleeReg, Callee.getValueType());
2039 if (
auto *
G = dyn_cast<GlobalAddressSDNode>(Callee)) {
2042 }
else if (
auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
2045 }
else if (IsTailCall) {
2048 Callee = DAG.
getRegister(SystemZ::R1D, Callee.getValueType());
2053 for (
unsigned I = 0, E = RegsToPass.
size();
I != E; ++
I) {
2055 RegsToPass[
I].second, Glue);
2066 for (
unsigned I = 0, E = RegsToPass.
size();
I != E; ++
I)
2068 RegsToPass[
I].second.getValueType()));
2072 const uint32_t *Mask =
TRI->getCallPreservedMask(MF, CallConv);
2073 assert(Mask &&
"Missing call preserved mask for calling convention");
2097 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, Ctx);
2104 VA.getLocVT(), Glue);
2121 bool DoesNotReturn,
bool IsReturnValueUsed)
const {
2123 Args.reserve(Ops.
size());
2128 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.
getContext());
2131 Args.push_back(Entry);
2157 for (
auto &Out : Outs)
2158 if (Out.ArgVT == MVT::i128)
2162 CCState RetCCInfo(CallConv, isVarArg, MF, RetLocs, Context);
2163 return RetCCInfo.
CheckReturn(Outs, RetCC_SystemZ);
2180 if (RetLocs.
empty())
2190 for (
unsigned I = 0, E = RetLocs.
size();
I != E; ++
I) {
2219 unsigned &CCValid) {
2220 unsigned Id =
Op.getConstantOperandVal(1);
2222 case Intrinsic::s390_tbegin:
2227 case Intrinsic::s390_tbegin_nofloat:
2232 case Intrinsic::s390_tend:
2246 unsigned Id =
Op.getConstantOperandVal(0);
2248 case Intrinsic::s390_vpkshs:
2249 case Intrinsic::s390_vpksfs:
2250 case Intrinsic::s390_vpksgs:
2255 case Intrinsic::s390_vpklshs:
2256 case Intrinsic::s390_vpklsfs:
2257 case Intrinsic::s390_vpklsgs:
2262 case Intrinsic::s390_vceqbs:
2263 case Intrinsic::s390_vceqhs:
2264 case Intrinsic::s390_vceqfs:
2265 case Intrinsic::s390_vceqgs:
2270 case Intrinsic::s390_vchbs:
2271 case Intrinsic::s390_vchhs:
2272 case Intrinsic::s390_vchfs:
2273 case Intrinsic::s390_vchgs:
2278 case Intrinsic::s390_vchlbs:
2279 case Intrinsic::s390_vchlhs:
2280 case Intrinsic::s390_vchlfs:
2281 case Intrinsic::s390_vchlgs:
2286 case Intrinsic::s390_vtm:
2291 case Intrinsic::s390_vfaebs:
2292 case Intrinsic::s390_vfaehs:
2293 case Intrinsic::s390_vfaefs:
2298 case Intrinsic::s390_vfaezbs:
2299 case Intrinsic::s390_vfaezhs:
2300 case Intrinsic::s390_vfaezfs:
2305 case Intrinsic::s390_vfeebs:
2306 case Intrinsic::s390_vfeehs:
2307 case Intrinsic::s390_vfeefs:
2312 case Intrinsic::s390_vfeezbs:
2313 case Intrinsic::s390_vfeezhs:
2314 case Intrinsic::s390_vfeezfs:
2319 case Intrinsic::s390_vfenebs:
2320 case Intrinsic::s390_vfenehs:
2321 case Intrinsic::s390_vfenefs:
2326 case Intrinsic::s390_vfenezbs:
2327 case Intrinsic::s390_vfenezhs:
2328 case Intrinsic::s390_vfenezfs:
2333 case Intrinsic::s390_vistrbs:
2334 case Intrinsic::s390_vistrhs:
2335 case Intrinsic::s390_vistrfs:
2340 case Intrinsic::s390_vstrcbs:
2341 case Intrinsic::s390_vstrchs:
2342 case Intrinsic::s390_vstrcfs:
2347 case Intrinsic::s390_vstrczbs:
2348 case Intrinsic::s390_vstrczhs:
2349 case Intrinsic::s390_vstrczfs:
2354 case Intrinsic::s390_vstrsb:
2355 case Intrinsic::s390_vstrsh:
2356 case Intrinsic::s390_vstrsf:
2361 case Intrinsic::s390_vstrszb:
2362 case Intrinsic::s390_vstrszh:
2363 case Intrinsic::s390_vstrszf:
2368 case Intrinsic::s390_vfcedbs:
2369 case Intrinsic::s390_vfcesbs:
2374 case Intrinsic::s390_vfchdbs:
2375 case Intrinsic::s390_vfchsbs:
2380 case Intrinsic::s390_vfchedbs:
2381 case Intrinsic::s390_vfchesbs:
2386 case Intrinsic::s390_vftcidb:
2387 case Intrinsic::s390_vftcisb:
2392 case Intrinsic::s390_tdc:
2410 for (
unsigned I = 2;
I < NumOps; ++
I)
2413 assert(
Op->getNumValues() == 2 &&
"Expected only CC result and chain");
2419 return Intr.getNode();
2429 for (
unsigned I = 1;
I < NumOps; ++
I)
2433 return Intr.getNode();
2443 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \
2444 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \
2445 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X
2470 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1.getNode());
2471 if (!ConstOp1 || ConstOp1->getValueSizeInBits(0) > 64)
2474 int64_t
Value = ConstOp1->getSExtValue();
2490 if (!
C.Op0.hasOneUse() ||
2496 auto *Load = cast<LoadSDNode>(
C.Op0);
2497 unsigned NumBits = Load->getMemoryVT().getSizeInBits();
2498 if ((NumBits != 8 && NumBits != 16) ||
2499 NumBits != Load->getMemoryVT().getStoreSizeInBits())
2504 auto *ConstOp1 = cast<ConstantSDNode>(
C.Op1);
2505 if (!ConstOp1 || ConstOp1->getValueSizeInBits(0) > 64)
2508 uint64_t Mask = (1 << NumBits) - 1;
2511 int64_t SignedValue = ConstOp1->getSExtValue();
2518 }
else if (NumBits == 8) {
2544 if (
C.Op0.getValueType() != MVT::i32 ||
2545 Load->getExtensionType() != ExtType) {
2547 Load->getBasePtr(), Load->getPointerInfo(),
2548 Load->getMemoryVT(), Load->getAlign(),
2549 Load->getMemOperand()->getFlags());
2555 if (
C.Op1.getValueType() != MVT::i32 ||
2556 Value != ConstOp1->getZExtValue())
2563 auto *Load = dyn_cast<LoadSDNode>(
Op.getNode());
2566 if (Load->getMemoryVT() == MVT::i8)
2569 switch (Load->getExtensionType()) {
2586 if (
C.Op0.getValueType() == MVT::i128)
2588 if (
C.Op0.getValueType() == MVT::f128)
2594 if (isa<ConstantFPSDNode>(
C.Op1))
2599 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1);
2600 if (ConstOp1 && ConstOp1->getZExtValue() == 0)
2618 isUInt<16>(ConstOp1->getZExtValue()))
2623 isInt<16>(ConstOp1->getSExtValue()))
2629 unsigned Opcode0 =
C.Op0.getOpcode();
2636 C.Op0.getConstantOperandVal(1) == 0xffffffff)
2651 ((
N->getOperand(0) ==
C.Op0 &&
N->getOperand(1) ==
C.Op1) ||
2652 (
N->getOperand(0) ==
C.Op1 &&
N->getOperand(1) ==
C.Op0))) {
2656 Flags.setNoSignedWrap(
false);
2657 Flags.setNoUnsignedWrap(
false);
2676 auto *C1 = dyn_cast<ConstantFPSDNode>(
C.Op1);
2677 if (C1 && C1->isZero()) {
2696 if (
C.Op0.getOpcode() ==
ISD::SHL &&
C.Op0.getValueType() == MVT::i64 &&
2698 auto *C1 = dyn_cast<ConstantSDNode>(
C.Op0.getOperand(1));
2699 if (C1 && C1->getZExtValue() == 32) {
2700 SDValue ShlOp0 =
C.Op0.getOperand(0);
2704 cast<VTSDNode>(
N->getOperand(1))->getVT() == MVT::i32) {
2719 C.Op0.getOperand(0).getOpcode() ==
ISD::LOAD &&
2721 cast<ConstantSDNode>(
C.Op1)->getValueSizeInBits(0) <= 64 &&
2722 C.Op1->getAsZExtVal() == 0) {
2723 auto *L = cast<LoadSDNode>(
C.Op0.getOperand(0));
2724 if (L->getMemoryVT().getStoreSizeInBits().getFixedValue() <=
2725 C.Op0.getValueSizeInBits().getFixedValue()) {
2726 unsigned Type = L->getExtensionType();
2729 C.Op0 =
C.Op0.getOperand(0);
2739 auto *Shift = dyn_cast<ConstantSDNode>(
N.getOperand(1));
2743 uint64_t Amount = Shift->getZExtValue();
2744 if (Amount >=
N.getValueSizeInBits())
2759 unsigned ICmpType) {
2760 assert(Mask != 0 &&
"ANDs with zero should have been removed by now");
2782 if (EffectivelyUnsigned && CmpVal > 0 && CmpVal <=
Low) {
2788 if (EffectivelyUnsigned && CmpVal <
Low) {
2796 if (CmpVal == Mask) {
2802 if (EffectivelyUnsigned && CmpVal >= Mask -
Low && CmpVal < Mask) {
2808 if (EffectivelyUnsigned && CmpVal > Mask -
Low && CmpVal <= Mask) {
2816 if (EffectivelyUnsigned && CmpVal >= Mask -
High && CmpVal <
High) {
2822 if (EffectivelyUnsigned && CmpVal > Mask -
High && CmpVal <=
High) {
2851 if (
C.Op0.getValueType() == MVT::i128) {
2856 auto *Mask = dyn_cast<ConstantSDNode>(
C.Op1);
2857 if (Mask && Mask->getAPIntValue() == 0) {
2872 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1);
2875 uint64_t CmpVal = ConstOp1->getZExtValue();
2882 NewC.Op0 =
C.Op0.getOperand(0);
2883 NewC.Op1 =
C.Op0.getOperand(1);
2884 Mask = dyn_cast<ConstantSDNode>(NewC.Op1);
2887 MaskVal = Mask->getZExtValue();
2892 if (NewC.Op0.getValueType() != MVT::i64 ||
2907 MaskVal = -(CmpVal & -CmpVal);
2915 unsigned BitSize = NewC.Op0.getValueSizeInBits();
2916 unsigned NewCCMask, ShiftVal;
2918 NewC.Op0.getOpcode() ==
ISD::SHL &&
2920 (MaskVal >> ShiftVal != 0) &&
2921 ((CmpVal >> ShiftVal) << ShiftVal) == CmpVal &&
2923 MaskVal >> ShiftVal,
2926 NewC.Op0 = NewC.Op0.getOperand(0);
2927 MaskVal >>= ShiftVal;
2929 NewC.Op0.getOpcode() ==
ISD::SRL &&
2931 (MaskVal << ShiftVal != 0) &&
2932 ((CmpVal << ShiftVal) >> ShiftVal) == CmpVal &&
2934 MaskVal << ShiftVal,
2937 NewC.Op0 = NewC.Op0.getOperand(0);
2938 MaskVal <<= ShiftVal;
2949 if (Mask && Mask->getZExtValue() == MaskVal)
2954 C.CCMask = NewCCMask;
2962 if (
C.Op0.getValueType() != MVT::i128)
2980 bool Swap =
false, Invert =
false;
2999 C.CCMask ^=
C.CCValid;
3009 auto *Mask = dyn_cast<ConstantSDNode>(
C.Op0.getOperand(1));
3010 if (!Mask || Mask->getValueSizeInBits(0) > 64)
3013 if ((~Known.
Zero).getZExtValue() & ~Mask->getZExtValue())
3016 C.Op0 =
C.Op0.getOperand(0);
3028 C.CCValid = CCValid;
3031 C.CCMask =
CC < 4 ? 1 << (3 -
CC) : 0;
3034 C.CCMask =
CC < 4 ? ~(1 << (3 -
CC)) : -1;
3038 C.CCMask =
CC < 4 ? ~0U << (4 -
CC) : -1;
3041 C.CCMask =
CC < 4 ? ~(~0U << (4 -
CC)) : 0;
3045 C.CCMask =
CC < 4 ? ~0U << (3 -
CC) : -1;
3048 C.CCMask =
CC < 4 ? ~(~0U << (3 -
CC)) : 0;
3051 C.CCMask &= CCValid;
3059 bool IsSignaling =
false) {
3062 unsigned Opcode, CCValid;
3074 Comparison
C(CmpOp0, CmpOp1, Chain);
3076 if (
C.Op0.getValueType().isFloatingPoint()) {
3080 else if (!IsSignaling)
3102 C.CCMask &= ~SystemZ::CCMASK_CMP_UO;
3123 if (!
C.Op1.getNode()) {
3125 switch (
C.Op0.getOpcode()) {
3152 return DAG.
getNode(
C.Opcode,
DL, VTs,
C.Chain,
C.Op0,
C.Op1);
3154 return DAG.
getNode(
C.Opcode,
DL, MVT::i32,
C.Op0,
C.Op1);
3163 Op0 = DAG.
getNode(Extend,
DL, MVT::i64, Op0);
3164 Op1 = DAG.
getNode(Extend,
DL, MVT::i64, Op1);
3189 unsigned CCValid,
unsigned CCMask) {
3218 case CmpMode::Int:
return 0;
3238 case CmpMode::FP:
return 0;
3239 case CmpMode::StrictFP:
return 0;
3240 case CmpMode::SignalingFP:
return 0;
3272 int Mask[] = { Start, -1, Start + 1, -1 };
3292 !Subtarget.hasVectorEnhancements1()) {
3306 SDValue Ops[2] = { Res, NewChain };
3315 return DAG.
getNode(Opcode,
DL, VTs, Chain, CmpOp0, CmpOp1);
3317 return DAG.
getNode(Opcode,
DL, VT, CmpOp0, CmpOp1);
3330 bool IsSignaling)
const {
3333 assert (!IsSignaling || Chain);
3334 CmpMode Mode = IsSignaling ? CmpMode::SignalingFP :
3335 Chain ? CmpMode::StrictFP : IsFP ? CmpMode::FP : CmpMode::Int;
3336 bool Invert =
false;
3344 assert(IsFP &&
"Unexpected integer comparison");
3346 DL, VT, CmpOp1, CmpOp0, Chain);
3348 DL, VT, CmpOp0, CmpOp1, Chain);
3352 LT.getValue(1),
GE.getValue(1));
3361 assert(IsFP &&
"Unexpected integer comparison");
3363 DL, VT, CmpOp1, CmpOp0, Chain);
3365 DL, VT, CmpOp0, CmpOp1, Chain);
3369 LT.getValue(1),
GT.getValue(1));
3378 Cmp = getVectorCmp(DAG, Opcode,
DL, VT, CmpOp0, CmpOp1, Chain);
3382 Cmp = getVectorCmp(DAG, Opcode,
DL, VT, CmpOp1, CmpOp0, Chain);
3387 Chain =
Cmp.getValue(1);
3395 if (Chain && Chain.
getNode() !=
Cmp.getNode()) {
3408 EVT VT =
Op.getValueType();
3410 return lowerVectorSETCC(DAG,
DL, VT,
CC, CmpOp0, CmpOp1);
3419 bool IsSignaling)
const {
3425 EVT VT =
Op.getNode()->getValueType(0);
3427 SDValue Res = lowerVectorSETCC(DAG,
DL, VT,
CC, CmpOp0, CmpOp1,
3428 Chain, IsSignaling);
3492 cast<ConstantSDNode>(
C.Op1)->getValueSizeInBits(0) <= 64 &&
3493 C.Op1->getAsZExtVal() == 0) {
3501 SDValue Ops[] = {TrueOp, FalseOp,
3575 Chain = DAG.
getCopyToReg(Chain,
DL, SystemZ::R2D, GOTOffset, Glue);
3582 Node->getValueType(0),
3594 assert(Mask &&
"Missing call preserved mask for calling convention");
3602 Chain = DAG.
getNode(Opcode,
DL, NodeTys, Ops);
3609SDValue SystemZTargetLowering::lowerThreadPointer(
const SDLoc &
DL,
3641 SDValue TP = lowerThreadPointer(
DL, DAG);
3749 if (
CP->isMachineConstantPoolEntry())
3768 unsigned Depth =
Op.getConstantOperandVal(0);
3775 int BackChainIdx = TFL->getOrCreateFramePointerSaveIndex(MF);
3804 unsigned Depth =
Op.getConstantOperandVal(0);
3812 SDValue FrameAddr = lowerFRAMEADDR(
Op, DAG);
3814 int Offset = TFL->getReturnAddressOffset(MF);
3825 &SystemZ::GR64BitRegClass);
3833 EVT InVT =
In.getValueType();
3834 EVT ResVT =
Op.getValueType();
3839 if (
auto *LoadN = dyn_cast<LoadSDNode>(In))
3842 LoadN->getBasePtr(), LoadN->getMemOperand());
3848 if (InVT == MVT::i32 && ResVT == MVT::f32) {
3850 if (Subtarget.hasHighWord()) {
3854 MVT::i64,
SDValue(U64, 0), In);
3862 DL, MVT::f32, Out64);
3864 if (InVT == MVT::f32 && ResVT == MVT::i32) {
3867 MVT::f64,
SDValue(U64, 0), In);
3869 if (Subtarget.hasHighWord())
3883 return lowerVASTART_XPLINK(
Op, DAG);
3885 return lowerVASTART_ELF(
Op, DAG);
3900 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
3914 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
3918 const unsigned NumFields = 4;
3929 for (
unsigned I = 0;
I < NumFields; ++
I) {
3934 MemOps[
I] = DAG.
getStore(Chain,
DL, Fields[
I], FieldAddr,
3946 const Value *DstSV = cast<SrcValueSDNode>(
Op.getOperand(3))->getValue();
3947 const Value *SrcSV = cast<SrcValueSDNode>(
Op.getOperand(4))->getValue();
3953 Align(8),
false,
false,
3959SystemZTargetLowering::lowerDYNAMIC_STACKALLOC(
SDValue Op,
3962 return lowerDYNAMIC_STACKALLOC_XPLINK(
Op, DAG);
3964 return lowerDYNAMIC_STACKALLOC_ELF(
Op, DAG);
3968SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_XPLINK(
SDValue Op,
3980 uint64_t AlignVal = (RealignOpt ?
Align->getAsZExtVal() : 0);
3983 uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
3984 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;
3990 if (ExtraAlignSpace)
3994 bool IsSigned =
false;
3995 bool DoesNotReturn =
false;
3996 bool IsReturnValueUsed =
false;
3997 EVT VT =
Op.getValueType();
4008 Register SPReg = Regs.getStackPointerRegister();
4019 if (ExtraAlignSpace) {
4031SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_ELF(
SDValue Op,
4045 uint64_t AlignVal = (RealignOpt ?
Align->getAsZExtVal() : 0);
4048 uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
4049 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;
4060 Backchain = DAG.
getLoad(MVT::i64,
DL, Chain, getBackchainAddress(OldSP, DAG),
4064 if (ExtraAlignSpace)
4072 DAG.
getVTList(MVT::i64, MVT::Other), Chain, OldSP, NeededSpace);
4088 if (RequiredAlign > StackAlign) {
4098 Chain = DAG.
getStore(Chain,
DL, Backchain, getBackchainAddress(NewSP, DAG),
4105SDValue SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET(
4114 EVT VT =
Op.getValueType();
4121 Op.getOperand(1), Ops[1], Ops[0]);
4122 else if (Subtarget.hasMiscellaneousExtensions2())
4127 Op.getOperand(0),
Op.getOperand(1), Ops[1], Ops[0]);
4151 LL, RL, Ops[1], Ops[0]);
4162 EVT VT =
Op.getValueType();
4169 Op.getOperand(1), Ops[1], Ops[0]);
4175 Op.getOperand(0),
Op.getOperand(1), Ops[1], Ops[0]);
4183 EVT VT =
Op.getValueType();
4203 EVT VT =
Op.getValueType();
4210 Op.getOperand(0),
Op.getOperand(1), Ops[1], Ops[0]);
4215 assert(
Op.getValueType() == MVT::i64 &&
"Should be 64-bit operation");
4218 SDValue Ops[] = {
Op.getOperand(0),
Op.getOperand(1)};
4227 if ((Masks[0] >> 32) == 0xffffffff &&
uint32_t(Masks[1]) == 0xffffffff)
4229 else if ((Masks[1] >> 32) == 0xffffffff &&
uint32_t(Masks[0]) == 0xffffffff)
4245 if (!isInt<16>(
Value))
4266 MVT::i64, HighOp, Low32);
4277 if (
N->getValueType(0) == MVT::i128) {
4278 unsigned BaseOp = 0;
4279 unsigned FlagOp = 0;
4280 bool IsBorrow =
false;
4281 switch (
Op.getOpcode()) {
4304 unsigned BaseOp = 0;
4305 unsigned CCValid = 0;
4306 unsigned CCMask = 0;
4308 switch (
Op.getOpcode()) {
4336 if (
N->getValueType(1) == MVT::i1)
4359 MVT VT =
N->getSimpleValueType(0);
4370 if (VT == MVT::i128) {
4371 unsigned BaseOp = 0;
4372 unsigned FlagOp = 0;
4373 bool IsBorrow =
false;
4374 switch (
Op.getOpcode()) {
4401 unsigned BaseOp = 0;
4402 unsigned CCValid = 0;
4403 unsigned CCMask = 0;
4405 switch (
Op.getOpcode()) {
4434 if (
N->getValueType(1) == MVT::i1)
4442 EVT VT =
Op.getValueType();
4444 Op =
Op.getOperand(0);
4492 if (NumSignificantBits == 0)
4498 BitSize = std::min(BitSize, OrigBitSize);
4507 for (int64_t
I = BitSize / 2;
I >= 8;
I =
I / 2) {
4509 if (BitSize != OrigBitSize)
4546 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
4548 (
Node->getMemoryVT() == MVT::i128 ||
Node->getMemoryVT() == MVT::f128) &&
4549 "Only custom lowering i128 or f128.");
4561 EVT PtrVT =
Addr.getValueType();
4562 EVT WideVT = MVT::i32;
4585 unsigned Opcode)
const {
4586 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
4589 EVT NarrowVT =
Node->getMemoryVT();
4590 EVT WideVT = MVT::i32;
4591 if (NarrowVT == WideVT)
4603 if (
auto *Const = dyn_cast<ConstantSDNode>(Src2)) {
4608 SDValue AlignedAddr, BitShift, NegBitShift;
4626 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift,
4645 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
4646 EVT MemVT =
Node->getMemoryVT();
4647 if (MemVT == MVT::i32 || MemVT == MVT::i64) {
4649 assert(
Op.getValueType() == MemVT &&
"Mismatched VTs");
4650 assert(Subtarget.hasInterlockedAccess1() &&
4651 "Should have been expanded by AtomicExpand pass.");
4657 Node->getChain(),
Node->getBasePtr(), NegSrc2,
4658 Node->getMemOperand());
4667 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
4675 if (
Node->getMemoryVT() == MVT::i128) {
4684 EVT NarrowVT =
Node->getMemoryVT();
4685 EVT WideVT = NarrowVT == MVT::i64 ? MVT::i64 : MVT::i32;
4686 if (NarrowVT == WideVT) {
4688 SDValue Ops[] = { ChainIn,
Addr, CmpVal, SwapVal };
4690 DL, Tys, Ops, NarrowVT, MMO);
4704 SDValue AlignedAddr, BitShift, NegBitShift;
4709 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift,
4712 VTList, Ops, NarrowVT, MMO);
4726SystemZTargetLowering::getTargetMMOFlags(
const Instruction &
I)
const {
4731 if (
auto *SI = dyn_cast<StoreInst>(&
I))
4734 if (
auto *LI = dyn_cast<LoadInst>(&
I))
4737 if (
auto *AI = dyn_cast<AtomicRMWInst>(&
I))
4740 if (
auto *AI = dyn_cast<AtomicCmpXchgInst>(&
I))
4752 "in GHC calling convention");
4754 Regs->getStackPointerRegister(),
Op.getValueType());
4765 "in GHC calling convention");
4772 if (StoreBackchain) {
4774 Chain,
DL, Regs->getStackPointerRegister(), MVT::i64);
4775 Backchain = DAG.
getLoad(MVT::i64,
DL, Chain, getBackchainAddress(OldSP, DAG),
4779 Chain = DAG.
getCopyToReg(Chain,
DL, Regs->getStackPointerRegister(), NewSP);
4782 Chain = DAG.
getStore(Chain,
DL, Backchain, getBackchainAddress(NewSP, DAG),
4790 bool IsData =
Op.getConstantOperandVal(4);
4793 return Op.getOperand(0);
4796 bool IsWrite =
Op.getConstantOperandVal(2);
4798 auto *
Node = cast<MemIntrinsicSDNode>(
Op.getNode());
4802 Node->getVTList(), Ops,
4803 Node->getMemoryVT(),
Node->getMemOperand());
4815SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(
SDValue Op,
4817 unsigned Opcode, CCValid;
4819 assert(
Op->getNumValues() == 2 &&
"Expected only CC result and chain");
4830SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(
SDValue Op,
4832 unsigned Opcode, CCValid;
4835 if (
Op->getNumValues() == 1)
4837 assert(
Op->getNumValues() == 2 &&
"Expected a CC and non-CC result");
4842 unsigned Id =
Op.getConstantOperandVal(0);
4844 case Intrinsic::thread_pointer:
4845 return lowerThreadPointer(
SDLoc(
Op), DAG);
4847 case Intrinsic::s390_vpdi:
4849 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4851 case Intrinsic::s390_vperm:
4853 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4855 case Intrinsic::s390_vuphb:
4856 case Intrinsic::s390_vuphh:
4857 case Intrinsic::s390_vuphf:
4861 case Intrinsic::s390_vuplhb:
4862 case Intrinsic::s390_vuplhh:
4863 case Intrinsic::s390_vuplhf:
4867 case Intrinsic::s390_vuplb:
4868 case Intrinsic::s390_vuplhw:
4869 case Intrinsic::s390_vuplf:
4873 case Intrinsic::s390_vupllb:
4874 case Intrinsic::s390_vupllh:
4875 case Intrinsic::s390_vupllf:
4879 case Intrinsic::s390_vsumb:
4880 case Intrinsic::s390_vsumh:
4881 case Intrinsic::s390_vsumgh:
4882 case Intrinsic::s390_vsumgf:
4883 case Intrinsic::s390_vsumqf:
4884 case Intrinsic::s390_vsumqg:
4886 Op.getOperand(1),
Op.getOperand(2));
4888 case Intrinsic::s390_vaq:
4890 Op.getOperand(1),
Op.getOperand(2));
4891 case Intrinsic::s390_vaccb:
4892 case Intrinsic::s390_vacch:
4893 case Intrinsic::s390_vaccf:
4894 case Intrinsic::s390_vaccg:
4895 case Intrinsic::s390_vaccq:
4897 Op.getOperand(1),
Op.getOperand(2));
4898 case Intrinsic::s390_vacq:
4900 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4901 case Intrinsic::s390_vacccq:
4903 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4905 case Intrinsic::s390_vsq:
4907 Op.getOperand(1),
Op.getOperand(2));
4908 case Intrinsic::s390_vscbib:
4909 case Intrinsic::s390_vscbih:
4910 case Intrinsic::s390_vscbif:
4911 case Intrinsic::s390_vscbig:
4912 case Intrinsic::s390_vscbiq:
4914 Op.getOperand(1),
Op.getOperand(2));
4915 case Intrinsic::s390_vsbiq:
4917 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4918 case Intrinsic::s390_vsbcbiq:
4920 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4941 { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 } },
4944 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
4947 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
4950 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
4953 { 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 } },
4956 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
4959 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
4962 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
4965 { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 } },
4968 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
4971 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
4974 { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } },
4977 { 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 } }
4991 OpNo0 = OpNo1 = OpNos[1];
4992 }
else if (OpNos[1] < 0) {
4993 OpNo0 = OpNo1 = OpNos[0];
5011 unsigned &OpNo0,
unsigned &OpNo1) {
5012 int OpNos[] = { -1, -1 };
5025 if (OpNos[ModelOpNo] == 1 - RealOpNo)
5027 OpNos[ModelOpNo] = RealOpNo;
5035 unsigned &OpNo0,
unsigned &OpNo1) {
5052 int Elt = Bytes[
From];
5055 Transform[
From] = -1;
5057 while (
P.Bytes[To] != Elt) {
5062 Transform[
From] = To;
5085 if (
auto *VSN = dyn_cast<ShuffleVectorSDNode>(ShuffleOp)) {
5086 Bytes.
resize(NumElements * BytesPerElement, -1);
5087 for (
unsigned I = 0;
I < NumElements; ++
I) {
5088 int Index = VSN->getMaskElt(
I);
5090 for (
unsigned J = 0; J < BytesPerElement; ++J)
5091 Bytes[
I * BytesPerElement + J] =
Index * BytesPerElement + J;
5096 isa<ConstantSDNode>(ShuffleOp.
getOperand(1))) {
5098 Bytes.
resize(NumElements * BytesPerElement, -1);
5099 for (
unsigned I = 0;
I < NumElements; ++
I)
5100 for (
unsigned J = 0; J < BytesPerElement; ++J)
5101 Bytes[
I * BytesPerElement + J] =
Index * BytesPerElement + J;
5112 unsigned BytesPerElement,
int &
Base) {
5114 for (
unsigned I = 0;
I < BytesPerElement; ++
I) {
5115 if (Bytes[Start +
I] >= 0) {
5116 unsigned Elem = Bytes[Start +
I];
5120 if (
unsigned(
Base) % Bytes.
size() + BytesPerElement > Bytes.
size())
5122 }
else if (
unsigned(
Base) != Elem -
I)
5135 unsigned &StartIndex,
unsigned &OpNo0,
5137 int OpNos[] = { -1, -1 };
5139 for (
unsigned I = 0;
I < 16; ++
I) {
5146 Shift = ExpectedShift;
5147 else if (Shift != ExpectedShift)
5151 if (OpNos[ModelOpNo] == 1 - RealOpNo)
5153 OpNos[ModelOpNo] = RealOpNo;
5190 N =
N->getOperand(0);
5192 if (
auto *
Op = dyn_cast<ConstantSDNode>(
N->getOperand(0)))
5193 return Op->getZExtValue() == 0;
5199 for (
unsigned I = 0;
I < Num ;
I++)
5211 for (
unsigned I = 0;
I < 2; ++
I)
5215 unsigned StartIndex, OpNo0, OpNo1;
5224 if (ZeroVecIdx != UINT32_MAX) {
5225 bool MaskFirst =
true;
5230 if (OpNo == ZeroVecIdx &&
I == 0) {
5235 if (OpNo != ZeroVecIdx && Byte == 0) {
5242 if (ZeroIdx != -1) {
5245 if (Bytes[
I] >= 0) {
5248 if (OpNo == ZeroVecIdx)
5258 SDValue Src = ZeroVecIdx == 0 ? Ops[1] : Ops[0];
5276 (!Ops[1].
isUndef() ? Ops[1] : Ops[0]), Op2);
5281struct GeneralShuffle {
5282 GeneralShuffle(
EVT vt) : VT(vt), UnpackFromEltSize(UINT_MAX) {}
5286 void tryPrepareForUnpack();
5287 bool unpackWasPrepared() {
return UnpackFromEltSize <= 4; }
5302 unsigned UnpackFromEltSize;
5307void GeneralShuffle::addUndef() {
5309 for (
unsigned I = 0;
I < BytesPerElement; ++
I)
5310 Bytes.push_back(-1);
5319bool GeneralShuffle::add(
SDValue Op,
unsigned Elem) {
5325 EVT FromVT =
Op.getNode() ?
Op.getValueType() : VT;
5330 if (FromBytesPerElement < BytesPerElement)
5334 (FromBytesPerElement - BytesPerElement));
5337 while (
Op.getNode()) {
5339 Op =
Op.getOperand(0);
5355 }
else if (
Op.isUndef()) {
5364 for (; OpNo < Ops.size(); ++OpNo)
5365 if (Ops[OpNo] ==
Op)
5367 if (OpNo == Ops.size())
5372 for (
unsigned I = 0;
I < BytesPerElement; ++
I)
5373 Bytes.push_back(
Base +
I);
5382 if (Ops.size() == 0)
5386 tryPrepareForUnpack();
5389 if (Ops.size() == 1)
5390 Ops.push_back(DAG.
getUNDEF(MVT::v16i8));
5401 unsigned Stride = 1;
5402 for (; Stride * 2 < Ops.size(); Stride *= 2) {
5403 for (
unsigned I = 0;
I < Ops.size() - Stride;
I += Stride * 2) {
5404 SDValue SubOps[] = { Ops[
I], Ops[
I + Stride] };
5413 else if (OpNo ==
I + Stride)
5424 if (NewBytes[J] >= 0) {
5426 "Invalid double permute");
5429 assert(NewBytesMap[J] < 0 &&
"Invalid double permute");
5435 if (NewBytes[J] >= 0)
5443 Ops[1] = Ops[Stride];
5451 unsigned OpNo0, OpNo1;
5453 if (unpackWasPrepared() && Ops[1].
isUndef())
5455 else if (
const Permute *
P =
matchPermute(Bytes, OpNo0, OpNo1))
5460 Op = insertUnpackIfPrepared(DAG,
DL,
Op);
5467 dbgs() << Msg.c_str() <<
" { ";
5468 for (
unsigned i = 0; i < Bytes.
size(); i++)
5469 dbgs() << Bytes[i] <<
" ";
5477void GeneralShuffle::tryPrepareForUnpack() {
5479 if (ZeroVecOpNo == UINT32_MAX || Ops.size() == 1)
5484 if (Ops.size() > 2 &&
5489 UnpackFromEltSize = 1;
5490 for (; UnpackFromEltSize <= 4; UnpackFromEltSize *= 2) {
5491 bool MatchUnpack =
true;
5494 unsigned ToEltSize = UnpackFromEltSize * 2;
5495 bool IsZextByte = (Elt % ToEltSize) < UnpackFromEltSize;
5498 if (Bytes[Elt] != -1) {
5500 if (IsZextByte != (OpNo == ZeroVecOpNo)) {
5501 MatchUnpack =
false;
5507 if (Ops.size() == 2) {
5510 if (SrcBytes[i] != -1 && SrcBytes[i] % 16 !=
int(i)) {
5511 UnpackFromEltSize = UINT_MAX;
5518 if (UnpackFromEltSize > 4)
5521 LLVM_DEBUG(
dbgs() <<
"Preparing for final unpack of element size "
5522 << UnpackFromEltSize <<
". Zero vector is Op#" << ZeroVecOpNo
5524 dumpBytes(Bytes,
"Original Bytes vector:"););
5529 Elt += UnpackFromEltSize;
5530 for (
unsigned i = 0; i < UnpackFromEltSize; i++, Elt++,
B++)
5531 Bytes[
B] = Bytes[Elt];
5537 Ops.erase(&Ops[ZeroVecOpNo]);
5539 if (Bytes[
I] >= 0) {
5541 if (OpNo > ZeroVecOpNo)
5552 if (!unpackWasPrepared())
5554 unsigned InBits = UnpackFromEltSize * 8;
5558 unsigned OutBits = InBits * 2;
5567 if (!
Op.getOperand(
I).isUndef())
5583 if (
Value.isUndef())
5636 GeneralShuffle GS(VT);
5638 bool FoundOne =
false;
5639 for (
unsigned I = 0;
I < NumElements; ++
I) {
5642 Op =
Op.getOperand(0);
5645 unsigned Elem =
Op.getConstantOperandVal(1);
5646 if (!GS.add(
Op.getOperand(0), Elem))
5649 }
else if (
Op.isUndef()) {
5663 if (!ResidueOps.
empty()) {
5664 while (ResidueOps.
size() < NumElements)
5666 for (
auto &
Op : GS.Ops) {
5667 if (!
Op.getNode()) {
5673 return GS.getNode(DAG,
SDLoc(BVN));
5676bool SystemZTargetLowering::isVectorElementLoad(
SDValue Op)
const {
5677 if (
Op.getOpcode() ==
ISD::LOAD && cast<LoadSDNode>(
Op)->isUnindexed())
5679 if (
auto *AL = dyn_cast<AtomicSDNode>(
Op))
5693 unsigned int NumElements = Elems.
size();
5694 unsigned int Count = 0;
5695 for (
auto Elem : Elems) {
5696 if (!Elem.isUndef()) {
5699 else if (Elem != Single) {
5719 if (
Single.getNode() && (Count > 1 || isVectorElementLoad(Single)))
5723 bool AllLoads =
true;
5724 for (
auto Elem : Elems)
5725 if (!isVectorElementLoad(Elem)) {
5731 if (VT == MVT::v2i64 && !AllLoads)
5735 if (VT == MVT::v2f64 && !AllLoads)
5745 if (VT == MVT::v4f32 && !AllLoads) {
5759 DL, MVT::v2i64, Op01, Op23);
5767 unsigned NumConstants = 0;
5768 for (
unsigned I = 0;
I < NumElements; ++
I) {
5782 if (NumConstants > 0) {
5783 for (
unsigned I = 0;
I < NumElements; ++
I)
5794 std::map<const SDNode*, unsigned> UseCounts;
5795 SDNode *LoadMaxUses =
nullptr;
5796 for (
unsigned I = 0;
I < NumElements; ++
I)
5797 if (isVectorElementLoad(Elems[
I])) {
5798 SDNode *Ld = Elems[
I].getNode();
5800 if (LoadMaxUses ==
nullptr || UseCounts[LoadMaxUses] < UseCounts[Ld])
5803 if (LoadMaxUses !=
nullptr) {
5804 ReplicatedVal =
SDValue(LoadMaxUses, 0);
5808 unsigned I1 = NumElements / 2 - 1;
5809 unsigned I2 = NumElements - 1;
5810 bool Def1 = !Elems[
I1].isUndef();
5811 bool Def2 = !Elems[I2].isUndef();
5825 for (
unsigned I = 0;
I < NumElements; ++
I)
5826 if (!
Done[
I] && !Elems[
I].
isUndef() && Elems[
I] != ReplicatedVal)
5834 auto *BVN = cast<BuildVectorSDNode>(
Op.getNode());
5836 EVT VT =
Op.getValueType();
5838 if (BVN->isConstant()) {
5857 for (
unsigned I = 0;
I < NumElements; ++
I)
5858 Ops[
I] =
Op.getOperand(
I);
5859 return buildVector(DAG,
DL, VT, Ops);
5864 auto *VSN = cast<ShuffleVectorSDNode>(
Op.getNode());
5866 EVT VT =
Op.getValueType();
5869 if (VSN->isSplat()) {
5871 unsigned Index = VSN->getSplatIndex();
5873 "Splat index should be defined and in first operand");
5883 GeneralShuffle
GS(VT);
5884 for (
unsigned I = 0;
I < NumElements; ++
I) {
5885 int Elt = VSN->getMaskElt(
I);
5888 else if (!
GS.add(
Op.getOperand(
unsigned(Elt) / NumElements),
5889 unsigned(Elt) % NumElements))
5892 return GS.getNode(DAG,
SDLoc(VSN));
5911 EVT VT =
Op.getValueType();
5916 if (VT == MVT::v2f64 &&
5936SystemZTargetLowering::lowerEXTRACT_VECTOR_ELT(
SDValue Op,
5942 EVT VT =
Op.getValueType();
5946 if (
auto *CIndexN = dyn_cast<ConstantSDNode>(Op1)) {
5961SDValue SystemZTargetLowering::
5964 EVT OutVT =
Op.getValueType();
5974 }
while (FromBits != ToBits);
5979SDValue SystemZTargetLowering::
5983 EVT OutVT =
Op.getValueType();
5987 unsigned NumInPerOut = InNumElts / OutNumElts;
5993 unsigned ZeroVecElt = InNumElts;
5994 for (
unsigned PackedElt = 0; PackedElt < OutNumElts; PackedElt++) {
5995 unsigned MaskElt = PackedElt * NumInPerOut;
5996 unsigned End = MaskElt + NumInPerOut - 1;
5997 for (; MaskElt <
End; MaskElt++)
5998 Mask[MaskElt] = ZeroVecElt++;
5999 Mask[MaskElt] = PackedElt;
6006 unsigned ByScalar)
const {
6011 EVT VT =
Op.getValueType();
6015 if (
auto *BVN = dyn_cast<BuildVectorSDNode>(Op1)) {
6016 APInt SplatBits, SplatUndef;
6017 unsigned SplatBitSize;
6021 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs,
6022 ElemBitSize,
true) &&
6023 SplatBitSize == ElemBitSize) {
6026 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6035 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6041 if (
auto *VSN = dyn_cast<ShuffleVectorSDNode>(Op1)) {
6042 if (VSN->isSplat()) {
6044 unsigned Index = VSN->getSplatIndex();
6046 "Splat index should be defined and in first operand");
6053 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6065 MVT ResultVT =
Op.getSimpleValueType();
6067 unsigned Check =
Op.getConstantOperandVal(1);
6069 unsigned TDCMask = 0;
6103 int SPFI = cast<FrameIndexSDNode>(
StackPtr.getNode())->getIndex();
6114 return DAG.
getLoad(MVT::i64,
DL, Chain, StackPtr, MPI);
6119 switch (
Op.getOpcode()) {
6121 return lowerFRAMEADDR(
Op, DAG);
6123 return lowerRETURNADDR(
Op, DAG);
6125 return lowerBR_CC(
Op, DAG);
6127 return lowerSELECT_CC(
Op, DAG);
6129 return lowerSETCC(
Op, DAG);
6131 return lowerSTRICT_FSETCC(
Op, DAG,
false);
6133 return lowerSTRICT_FSETCC(
Op, DAG,
true);
6135 return lowerGlobalAddress(cast<GlobalAddressSDNode>(
Op), DAG);
6137 return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(
Op), DAG);
6139 return lowerBlockAddress(cast<BlockAddressSDNode>(
Op), DAG);
6141 return lowerJumpTable(cast<JumpTableSDNode>(
Op), DAG);
6143 return lowerConstantPool(cast<ConstantPoolSDNode>(
Op), DAG);
6145 return lowerBITCAST(
Op, DAG);
6147 return lowerVASTART(
Op, DAG);
6149 return lowerVACOPY(
Op, DAG);
6151 return lowerDYNAMIC_STACKALLOC(
Op, DAG);
6153 return lowerGET_DYNAMIC_AREA_OFFSET(
Op, DAG);
6155 return lowerSMUL_LOHI(
Op, DAG);
6157 return lowerUMUL_LOHI(
Op, DAG);
6159 return lowerSDIVREM(
Op, DAG);
6161 return lowerUDIVREM(
Op, DAG);
6166 return lowerXALUO(
Op, DAG);
6169 return lowerUADDSUBO_CARRY(
Op, DAG);
6171 return lowerOR(
Op, DAG);
6173 return lowerCTPOP(
Op, DAG);
6175 return lowerVECREDUCE_ADD(
Op, DAG);
6177 return lowerATOMIC_FENCE(
Op, DAG);
6182 return lowerATOMIC_LDST_I128(
Op, DAG);
6186 return lowerATOMIC_LOAD_SUB(
Op, DAG);
6204 return lowerATOMIC_CMP_SWAP(
Op, DAG);
6206 return lowerSTACKSAVE(
Op, DAG);
6208 return lowerSTACKRESTORE(
Op, DAG);
6210 return lowerPREFETCH(
Op, DAG);
6212 return lowerINTRINSIC_W_CHAIN(
Op, DAG);
6214 return lowerINTRINSIC_WO_CHAIN(
Op, DAG);
6216 return lowerBUILD_VECTOR(
Op, DAG);
6218 return lowerVECTOR_SHUFFLE(
Op, DAG);
6220 return lowerSCALAR_TO_VECTOR(
Op, DAG);
6222 return lowerINSERT_VECTOR_ELT(
Op, DAG);
6224 return lowerEXTRACT_VECTOR_ELT(
Op, DAG);
6226 return lowerSIGN_EXTEND_VECTOR_INREG(
Op, DAG);
6228 return lowerZERO_EXTEND_VECTOR_INREG(
Op, DAG);
6238 return lowerIS_FPCLASS(
Op, DAG);
6240 return lowerGET_ROUNDING(
Op, DAG);
6242 return lowerREADCYCLECOUNTER(
Op, DAG);
6256 &SystemZ::FP128BitRegClass);
6265 SystemZ::REG_SEQUENCE, SL, MVT::f128,
6280 &SystemZ::FP128BitRegClass);
6298 switch (
N->getOpcode()) {
6302 SDValue Ops[] = {
N->getOperand(0),
N->getOperand(1) };
6305 DL, Tys, Ops, MVT::i128, MMO);
6308 if (
N->getValueType(0) == MVT::f128)
6322 SDValue Ops[] = {
N->getOperand(0), Val,
N->getOperand(2)};
6325 DL, Tys, Ops, MVT::i128, MMO);
6328 if (cast<AtomicSDNode>(
N)->getSuccessOrdering() ==
6331 MVT::Other, Res), 0);
6338 SDValue Ops[] = {
N->getOperand(0),
N->getOperand(1),
6343 DL, Tys, Ops, MVT::i128, MMO);
6354 if (
N->getValueType(0) == MVT::i128 && Src.getValueType() == MVT::f128 &&
6374#define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME
6485 OPCODE(ATOMIC_LOADW_ADD);
6486 OPCODE(ATOMIC_LOADW_SUB);
6487 OPCODE(ATOMIC_LOADW_AND);
6489 OPCODE(ATOMIC_LOADW_XOR);
6490 OPCODE(ATOMIC_LOADW_NAND);
6491 OPCODE(ATOMIC_LOADW_MIN);
6492 OPCODE(ATOMIC_LOADW_MAX);
6493 OPCODE(ATOMIC_LOADW_UMIN);
6494 OPCODE(ATOMIC_LOADW_UMAX);
6495 OPCODE(ATOMIC_CMP_SWAPW);
6498 OPCODE(ATOMIC_STORE_128);
6499 OPCODE(ATOMIC_CMP_SWAP_128);
6514bool SystemZTargetLowering::canTreatAsByteVector(
EVT VT)
const {
6515 if (!Subtarget.hasVector())
6529 DAGCombinerInfo &DCI,
6537 unsigned Opcode =
Op.getOpcode();
6540 Op =
Op.getOperand(0);
6542 canTreatAsByteVector(
Op.getValueType())) {
6551 BytesPerElement,
First))
6558 if (Byte % BytesPerElement != 0)
6561 Index = Byte / BytesPerElement;
6565 canTreatAsByteVector(
Op.getValueType())) {
6568 EVT OpVT =
Op.getValueType();
6570 if (OpBytesPerElement < BytesPerElement)
6574 unsigned End = (
Index + 1) * BytesPerElement;
6575 if (
End % OpBytesPerElement != 0)
6578 Op =
Op.getOperand(
End / OpBytesPerElement - 1);
6579 if (!
Op.getValueType().isInteger()) {
6582 DCI.AddToWorklist(
Op.getNode());
6587 DCI.AddToWorklist(
Op.getNode());
6594 canTreatAsByteVector(
Op.getValueType()) &&
6595 canTreatAsByteVector(
Op.getOperand(0).getValueType())) {
6597 EVT ExtVT =
Op.getValueType();
6598 EVT OpVT =
Op.getOperand(0).getValueType();
6601 unsigned Byte =
Index * BytesPerElement;
6602 unsigned SubByte =
Byte % ExtBytesPerElement;
6603 unsigned MinSubByte = ExtBytesPerElement - OpBytesPerElement;
6604 if (SubByte < MinSubByte ||
6605 SubByte + BytesPerElement > ExtBytesPerElement)
6608 Byte =
Byte / ExtBytesPerElement * OpBytesPerElement;
6610 Byte += SubByte - MinSubByte;
6611 if (Byte % BytesPerElement != 0)
6613 Op =
Op.getOperand(0);
6620 if (
Op.getValueType() != VecVT) {
6622 DCI.AddToWorklist(
Op.getNode());
6632SDValue SystemZTargetLowering::combineTruncateExtract(
6641 if (canTreatAsByteVector(VecVT)) {
6642 if (
auto *IndexN = dyn_cast<ConstantSDNode>(
Op.getOperand(1))) {
6645 if (BytesPerElement % TruncBytes == 0) {
6651 unsigned Scale = BytesPerElement / TruncBytes;
6652 unsigned NewIndex = (IndexN->getZExtValue() + 1) * Scale - 1;
6658 EVT ResVT = (TruncBytes < 4 ? MVT::i32 : TruncVT);
6659 return combineExtract(
DL, ResVT, VecVT, Vec, NewIndex, DCI,
true);
6667SDValue SystemZTargetLowering::combineZERO_EXTEND(
6668 SDNode *
N, DAGCombinerInfo &DCI)
const {
6672 EVT VT =
N->getValueType(0);
6674 auto *TrueOp = dyn_cast<ConstantSDNode>(N0.
getOperand(0));
6675 auto *FalseOp = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
6676 if (TrueOp && FalseOp) {
6686 DCI.CombineTo(N0.
getNode(), TruncSelect);
6716SDValue SystemZTargetLowering::combineSIGN_EXTEND_INREG(
6717 SDNode *
N, DAGCombinerInfo &DCI)
const {
6723 EVT VT =
N->getValueType(0);
6724 EVT EVT = cast<VTSDNode>(
N->getOperand(1))->getVT();
6737SDValue SystemZTargetLowering::combineSIGN_EXTEND(
6738 SDNode *
N, DAGCombinerInfo &DCI)
const {
6744 EVT VT =
N->getValueType(0);
6746 auto *SraAmt = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
6749 if (
auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.
getOperand(1))) {
6751 unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra;
6752 unsigned NewSraAmt = SraAmt->getZExtValue() + Extra;
6768SDValue SystemZTargetLowering::combineMERGE(
6769 SDNode *
N, DAGCombinerInfo &DCI)
const {
6771 unsigned Opcode =
N->getOpcode();
6779 if (Op1 ==
N->getOperand(0))
6784 if (ElemBytes <= 4) {
6792 DCI.AddToWorklist(Op1.
getNode());
6795 DCI.AddToWorklist(
Op.getNode());
6804 LoPart = HiPart =
nullptr;
6808 UI != UIEnd; ++UI) {
6810 if (UI.getUse().getResNo() != 0)
6815 bool IsLoPart =
true;
6840 LoPart = HiPart =
nullptr;
6844 UI != UIEnd; ++UI) {
6846 if (UI.getUse().getResNo() != 0)
6852 User->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
6855 switch (
User->getConstantOperandVal(1)) {
6856 case SystemZ::subreg_l64:
6861 case SystemZ::subreg_h64:
6873SDValue SystemZTargetLowering::combineLOAD(
6874 SDNode *
N, DAGCombinerInfo &DCI)
const {
6876 EVT LdVT =
N->getValueType(0);
6891 LD->getPointerInfo(),
LD->getOriginalAlign(),
6892 LD->getMemOperand()->getFlags(),
LD->getAAInfo());
6894 DCI.CombineTo(HiPart, EltLoad,
true);
6901 LD->getPointerInfo().getWithOffset(8),
LD->getOriginalAlign(),
6902 LD->getMemOperand()->getFlags(),
LD->getAAInfo());
6904 DCI.CombineTo(LoPart, EltLoad,
true);
6911 DCI.AddToWorklist(Chain.
getNode());
6933 else if (UI.getUse().getResNo() == 0)
6936 if (!Replicate || OtherUses.
empty())
6942 for (
SDNode *U : OtherUses) {
6951bool SystemZTargetLowering::canLoadStoreByteSwapped(
EVT VT)
const {
6952 if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64)
6954 if (Subtarget.hasVectorEnhancements2())
6955 if (VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v2i64 || VT == MVT::i128)
6967 for (
unsigned i = 0; i < NumElts; ++i) {
6968 if (M[i] < 0)
continue;
6969 if ((
unsigned) M[i] != NumElts - 1 - i)
6977 for (
auto *U : StoredVal->
uses()) {
6979 EVT CurrMemVT = ST->getMemoryVT().getScalarType();
6982 }
else if (isa<BuildVectorSDNode>(U)) {
7038SDValue SystemZTargetLowering::combineSTORE(
7039 SDNode *
N, DAGCombinerInfo &DCI)
const {
7041 auto *SN = cast<StoreSDNode>(
N);
7042 auto &Op1 =
N->getOperand(1);
7043 EVT MemVT = SN->getMemoryVT();
7048 if (MemVT.
isInteger() && SN->isTruncatingStore()) {
7050 combineTruncateExtract(
SDLoc(
N), MemVT, SN->getValue(), DCI)) {
7051 DCI.AddToWorklist(
Value.getNode());
7055 SN->getBasePtr(), SN->getMemoryVT(),
7056 SN->getMemOperand());
7060 if (!SN->isTruncatingStore() &&
7071 N->getOperand(0), BSwapOp,
N->getOperand(2)
7076 Ops, MemVT, SN->getMemOperand());
7079 if (!SN->isTruncatingStore() &&
7082 Subtarget.hasVectorEnhancements2()) {
7092 Ops, MemVT, SN->getMemOperand());
7097 if (!SN->isTruncatingStore() &&
7100 N->getOperand(0).reachesChainWithoutSideEffects(
SDValue(Op1.
getNode(), 1))) {
7104 Ops, MemVT, SN->getMemOperand());
7114 DAG.
getStore(SN->getChain(),
DL, HiPart, SN->getBasePtr(),
7115 SN->getPointerInfo(), SN->getOriginalAlign(),
7116 SN->getMemOperand()->getFlags(), SN->getAAInfo());
7121 SN->getPointerInfo().getWithOffset(8),
7122 SN->getOriginalAlign(),
7123 SN->getMemOperand()->
getFlags(), SN->getAAInfo());
7143 if (
C->getAPIntValue().getBitWidth() > 64 ||
C->isAllOnes() ||
7147 if (VCI.isVectorConstantLegal(Subtarget) &&
7156 auto FindReplicatedReg = [&](
SDValue MulOp) {
7157 EVT MulVT = MulOp.getValueType();
7158 if (MulOp->getOpcode() ==
ISD::MUL &&
7159 (MulVT == MVT::i16 || MulVT == MVT::i32 || MulVT == MVT::i64)) {
7163 WordVT =
LHS->getOperand(0).getValueType();
7165 WordVT = cast<VTSDNode>(
LHS->getOperand(1))->getVT();
7169 if (
auto *
C = dyn_cast<ConstantSDNode>(MulOp->getOperand(1))) {
7171 APInt(MulVT.getSizeInBits(),
C->getZExtValue()));
7172 if (VCI.isVectorConstantLegal(Subtarget) &&
7174 WordVT == VCI.VecVT.getScalarType())
7180 if (isa<BuildVectorSDNode>(Op1) &&
7183 if (
auto *
C = dyn_cast<ConstantSDNode>(SplatVal))
7186 FindReplicatedReg(SplatVal);
7188 if (
auto *
C = dyn_cast<ConstantSDNode>(Op1))
7191 FindReplicatedReg(Op1);
7196 "Bad type handling");
7201 SN->getBasePtr(), SN->getMemOperand());
7208SDValue SystemZTargetLowering::combineVECTOR_SHUFFLE(
7209 SDNode *
N, DAGCombinerInfo &DCI)
const {
7213 N->getOperand(0).hasOneUse() &&
7214 Subtarget.hasVectorEnhancements2()) {
7229 Ops,
LD->getMemoryVT(),
LD->getMemOperand());
7233 DCI.CombineTo(
N, ESLoad);
7237 DCI.CombineTo(
Load.getNode(), ESLoad, ESLoad.
getValue(1));
7247SDValue SystemZTargetLowering::combineEXTRACT_VECTOR_ELT(
7248 SDNode *
N, DAGCombinerInfo &DCI)
const {
7251 if (!Subtarget.hasVector())
7257 Op.getValueType().isVector() &&
7258 Op.getOperand(0).getValueType().isVector() &&
7259 Op.getValueType().getVectorNumElements() ==
7260 Op.getOperand(0).getValueType().getVectorNumElements())
7261 Op =
Op.getOperand(0);
7265 EVT VecVT =
Op.getValueType();
7268 Op.getOperand(0),
N->getOperand(1));
7269 DCI.AddToWorklist(
Op.getNode());
7271 if (EltVT !=
N->getValueType(0)) {
7272 DCI.AddToWorklist(
Op.getNode());
7279 if (
auto *IndexN = dyn_cast<ConstantSDNode>(
N->getOperand(1))) {
7282 return combineExtract(
SDLoc(
N),
N->getValueType(0), VecVT, Op0,
7283 IndexN->getZExtValue(), DCI,
false);
7288SDValue SystemZTargetLowering::combineJOIN_DWORDS(
7289 SDNode *
N, DAGCombinerInfo &DCI)
const {
7292 if (
N->getOperand(0) ==
N->getOperand(1))
7303 if (Chain1 == Chain2)
7311SDValue SystemZTargetLowering::combineFP_ROUND(
7312 SDNode *
N, DAGCombinerInfo &DCI)
const {
7314 if (!Subtarget.hasVector())
7323 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
7326 if (
N->getValueType(0) == MVT::f32 && Op0.
hasOneUse() &&
7332 for (
auto *U : Vec->
uses()) {
7333 if (U != Op0.
getNode() &&
U->hasOneUse() &&
7335 U->getOperand(0) == Vec &&
7337 U->getConstantOperandVal(1) == 1) {
7339 if (OtherRound.
getOpcode() ==
N->getOpcode() &&
7343 if (
N->isStrictFPOpcode()) {
7348 {MVT::v4f32, MVT::Other}, {Chain, Vec});
7353 DCI.AddToWorklist(VRound.
getNode());
7357 DCI.AddToWorklist(Extract1.
getNode());
7366 N->getVTList(), Extract0, Chain);
7375SDValue SystemZTargetLowering::combineFP_EXTEND(
7376 SDNode *
N, DAGCombinerInfo &DCI)
const {
7378 if (!Subtarget.hasVector())
7387 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
7390 if (
N->getValueType(0) == MVT::f64 && Op0.
hasOneUse() &&
7396 for (
auto *U : Vec->
uses()) {
7397 if (U != Op0.
getNode() &&
U->hasOneUse() &&
7399 U->getOperand(0) == Vec &&
7401 U->getConstantOperandVal(1) == 2) {
7403 if (OtherExtend.
getOpcode() ==
N->getOpcode() &&
7407 if (
N->isStrictFPOpcode()) {
7412 {MVT::v2f64, MVT::Other}, {Chain, Vec});
7417 DCI.AddToWorklist(VExtend.
getNode());
7421 DCI.AddToWorklist(Extract1.
getNode());
7430 N->getVTList(), Extract0, Chain);
7439SDValue SystemZTargetLowering::combineINT_TO_FP(
7440 SDNode *
N, DAGCombinerInfo &DCI)
const {
7445 unsigned Opcode =
N->getOpcode();
7446 EVT OutVT =
N->getValueType(0);
7450 unsigned InScalarBits =
Op->getValueType(0).getScalarSizeInBits();
7456 if (OutLLVMTy->
isVectorTy() && OutScalarBits > InScalarBits &&
7457 OutScalarBits <= 64) {
7458 unsigned NumElts = cast<FixedVectorType>(OutLLVMTy)->getNumElements();
7461 unsigned ExtOpcode =
7469SDValue SystemZTargetLowering::combineBSWAP(
7470 SDNode *
N, DAGCombinerInfo &DCI)
const {
7474 N->getOperand(0).hasOneUse() &&
7475 canLoadStoreByteSwapped(
N->getValueType(0))) {
7484 EVT LoadVT =
N->getValueType(0);
7485 if (LoadVT == MVT::i16)
7490 Ops,
LD->getMemoryVT(),
LD->getMemOperand());
7494 if (
N->getValueType(0) == MVT::i16)
7499 DCI.CombineTo(
N, ResVal);
7503 DCI.CombineTo(
Load.getNode(), ResVal, BSLoad.
getValue(1));
7512 Op.getValueType().isVector() &&
7513 Op.getOperand(0).getValueType().isVector() &&
7514 Op.getValueType().getVectorNumElements() ==
7515 Op.getOperand(0).getValueType().getVectorNumElements())
7516 Op =
Op.getOperand(0);
7528 (canLoadStoreByteSwapped(
N->getValueType(0)) &&
7530 EVT VecVT =
N->getValueType(0);
7531 EVT EltVT =
N->getValueType(0).getVectorElementType();
7534 DCI.AddToWorklist(Vec.
getNode());
7538 DCI.AddToWorklist(Elt.
getNode());
7541 DCI.AddToWorklist(Vec.
getNode());
7543 DCI.AddToWorklist(Elt.
getNode());
7551 if (SV &&
Op.hasOneUse()) {
7559 EVT VecVT =
N->getValueType(0);
7562 DCI.AddToWorklist(Op0.
getNode());
7566 DCI.AddToWorklist(Op1.
getNode());
7569 DCI.AddToWorklist(Op0.
getNode());
7571 DCI.AddToWorklist(Op1.
getNode());
7593 auto *CompareRHS = dyn_cast<ConstantSDNode>(ICmp->getOperand(1));
7600 bool Invert =
false;
7607 auto *TrueVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(0));
7610 auto *FalseVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1));
7613 if (CompareRHS->getZExtValue() == FalseVal->getZExtValue())
7615 else if (CompareRHS->getZExtValue() != TrueVal->getZExtValue())
7619 auto *NewCCValid = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(2));
7620 auto *NewCCMask = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(3));
7621 if (!NewCCValid || !NewCCMask)
7623 CCValid = NewCCValid->getZExtValue();
7624 CCMask = NewCCMask->getZExtValue();
7634 if (CompareLHS->getOpcode() ==
ISD::SRA) {
7635 auto *SRACount = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1));
7636 if (!SRACount || SRACount->getZExtValue() != 30)
7638 auto *SHL = CompareLHS->getOperand(0).getNode();
7641 auto *SHLCount = dyn_cast<ConstantSDNode>(SHL->getOperand(1));
7644 auto *IPM = SHL->getOperand(0).getNode();
7649 if (!CompareLHS->hasOneUse())
7652 if (CompareRHS->getZExtValue() != 0)
7659 CCReg = IPM->getOperand(0);
7666SDValue SystemZTargetLowering::combineBR_CCMASK(
7667 SDNode *
N, DAGCombinerInfo &DCI)
const {
7671 auto *CCValid = dyn_cast<ConstantSDNode>(
N->getOperand(1));
7672 auto *CCMask = dyn_cast<ConstantSDNode>(
N->getOperand(2));
7673 if (!CCValid || !CCMask)
7676 int CCValidVal = CCValid->getZExtValue();
7677 int CCMaskVal = CCMask->getZExtValue();
7686 N->getOperand(3), CCReg);
7690SDValue SystemZTargetLowering::combineSELECT_CCMASK(
7691 SDNode *
N, DAGCombinerInfo &DCI)
const {
7695 auto *CCValid = dyn_cast<ConstantSDNode>(
N->getOperand(2));
7696 auto *CCMask = dyn_cast<ConstantSDNode>(
N->getOperand(3));
7697 if (!CCValid || !CCMask)
7700 int CCValidVal = CCValid->getZExtValue();
7701 int CCMaskVal = CCMask->getZExtValue();
7706 N->getOperand(0),
N->getOperand(1),
7714SDValue SystemZTargetLowering::combineGET_CCMASK(
7715 SDNode *
N, DAGCombinerInfo &DCI)
const {
7718 auto *CCValid = dyn_cast<ConstantSDNode>(
N->getOperand(1));
7719 auto *CCMask = dyn_cast<ConstantSDNode>(
N->getOperand(2));
7720 if (!CCValid || !CCMask)
7722 int CCValidVal = CCValid->getZExtValue();
7723 int CCMaskVal = CCMask->getZExtValue();
7731 auto *SelectCCValid = dyn_cast<ConstantSDNode>(
Select->getOperand(2));
7732 auto *SelectCCMask = dyn_cast<ConstantSDNode>(
Select->getOperand(3));
7733 if (!SelectCCValid || !SelectCCMask)
7735 int SelectCCValidVal = SelectCCValid->getZExtValue();
7736 int SelectCCMaskVal = SelectCCMask->getZExtValue();
7738 auto *
TrueVal = dyn_cast<ConstantSDNode>(
Select->getOperand(0));
7739 auto *
FalseVal = dyn_cast<ConstantSDNode>(
Select->getOperand(1));
7740 if (!TrueVal || !FalseVal)
7744 else if (
TrueVal->getZExtValue() == 0 &&
FalseVal->getZExtValue() == 1)
7745 SelectCCMaskVal ^= SelectCCValidVal;
7749 if (SelectCCValidVal & ~CCValidVal)
7751 if (SelectCCMaskVal != (CCMaskVal & SelectCCValidVal))
7754 return Select->getOperand(4);
7757SDValue SystemZTargetLowering::combineIntDIVREM(
7758 SDNode *
N, DAGCombinerInfo &DCI)
const {
7760 EVT VT =
N->getValueType(0);
7774SDValue SystemZTargetLowering::combineINTRINSIC(
7775 SDNode *
N, DAGCombinerInfo &DCI)
const {
7778 unsigned Id =
N->getConstantOperandVal(1);
7782 case Intrinsic::s390_vll:
7783 case Intrinsic::s390_vlrl:
7784 if (
auto *
C = dyn_cast<ConstantSDNode>(
N->getOperand(2)))
7785 if (
C->getZExtValue() >= 15)
7790 case Intrinsic::s390_vstl:
7791 case Intrinsic::s390_vstrl:
7792 if (
auto *
C = dyn_cast<ConstantSDNode>(
N->getOperand(3)))
7793 if (
C->getZExtValue() >= 15)
7804 return N->getOperand(0);
7810 switch(
N->getOpcode()) {
7835 case ISD::UREM:
return combineIntDIVREM(
N, DCI);
7847 EVT VT =
Op.getValueType();
7850 unsigned Opcode =
Op.getOpcode();
7852 unsigned Id =
Op.getConstantOperandVal(0);
7854 case Intrinsic::s390_vpksh:
7855 case Intrinsic::s390_vpksf:
7856 case Intrinsic::s390_vpksg:
7857 case Intrinsic::s390_vpkshs:
7858 case Intrinsic::s390_vpksfs:
7859 case Intrinsic::s390_vpksgs:
7860 case Intrinsic::s390_vpklsh:
7861 case Intrinsic::s390_vpklsf:
7862 case Intrinsic::s390_vpklsg:
7863 case Intrinsic::s390_vpklshs:
7864 case Intrinsic::s390_vpklsfs:
7865 case Intrinsic::s390_vpklsgs:
7867 SrcDemE = DemandedElts;
7870 SrcDemE = SrcDemE.
trunc(NumElts / 2);
7873 case Intrinsic::s390_vuphb:
7874 case Intrinsic::s390_vuphh:
7875 case Intrinsic::s390_vuphf:
7876 case Intrinsic::s390_vuplhb:
7877 case Intrinsic::s390_vuplhh:
7878 case Intrinsic::s390_vuplhf:
7879 SrcDemE =
APInt(NumElts * 2, 0);
7882 case Intrinsic::s390_vuplb:
7883 case Intrinsic::s390_vuplhw:
7884 case Intrinsic::s390_vuplf:
7885 case Intrinsic::s390_vupllb:
7886 case Intrinsic::s390_vupllh:
7887 case Intrinsic::s390_vupllf:
7888 SrcDemE =
APInt(NumElts * 2, 0);
7891 case Intrinsic::s390_vpdi: {
7893 SrcDemE =
APInt(NumElts, 0);
7894 if (!DemandedElts[OpNo - 1])
7896 unsigned Mask =
Op.getConstantOperandVal(3);
7897 unsigned MaskBit = ((OpNo - 1) ? 1 : 4);
7899 SrcDemE.
setBit((Mask & MaskBit)? 1 : 0);
7902 case Intrinsic::s390_vsldb: {
7904 assert(VT == MVT::v16i8 &&
"Unexpected type.");
7905 unsigned FirstIdx =
Op.getConstantOperandVal(3);
7906 assert (FirstIdx > 0 && FirstIdx < 16 &&
"Unused operand.");
7907 unsigned NumSrc0Els = 16 - FirstIdx;
7908 SrcDemE =
APInt(NumElts, 0);
7910 APInt DemEls = DemandedElts.
trunc(NumSrc0Els);
7913 APInt DemEls = DemandedElts.
lshr(NumSrc0Els);
7918 case Intrinsic::s390_vperm:
7919 SrcDemE =
APInt(NumElts, -1);
7929 SrcDemE =
APInt(1, 1);
7932 SrcDemE = DemandedElts;
7943 const APInt &DemandedElts,
7958 const APInt &DemandedElts,
7960 unsigned Depth)
const {
7964 unsigned tmp0, tmp1;
7969 EVT VT =
Op.getValueType();
7970 if (
Op.getResNo() != 0 || VT == MVT::Untyped)
7973 "KnownBits does not match VT in bitwidth");
7976 "DemandedElts does not match VT number of elements");
7978 unsigned Opcode =
Op.getOpcode();
7980 bool IsLogical =
false;
7981 unsigned Id =
Op.getConstantOperandVal(0);
7983 case Intrinsic::s390_vpksh:
7984 case Intrinsic::s390_vpksf:
7985 case Intrinsic::s390_vpksg:
7986 case Intrinsic::s390_vpkshs:
7987 case Intrinsic::s390_vpksfs:
7988 case Intrinsic::s390_vpksgs:
7989 case Intrinsic::s390_vpklsh:
7990 case Intrinsic::s390_vpklsf:
7991 case Intrinsic::s390_vpklsg:
7992 case Intrinsic::s390_vpklshs:
7993 case Intrinsic::s390_vpklsfs:
7994 case Intrinsic::s390_vpklsgs:
7995 case Intrinsic::s390_vpdi:
7996 case Intrinsic::s390_vsldb:
7997 case Intrinsic::s390_vperm:
8000 case Intrinsic::s390_vuplhb:
8001 case Intrinsic::s390_vuplhh:
8002 case Intrinsic::s390_vuplhf:
8003 case Intrinsic::s390_vupllb:
8004 case Intrinsic::s390_vupllh:
8005 case Intrinsic::s390_vupllf:
8008 case Intrinsic::s390_vuphb:
8009 case Intrinsic::s390_vuphh:
8010 case Intrinsic::s390_vuphf:
8011 case Intrinsic::s390_vuplb:
8012 case Intrinsic::s390_vuplhw:
8013 case Intrinsic::s390_vuplf: {
8055 if (
LHS == 1)
return 1;
8058 if (
RHS == 1)
return 1;
8059 unsigned Common = std::min(
LHS,
RHS);
8060 unsigned SrcBitWidth =
Op.getOperand(OpNo).getScalarValueSizeInBits();
8061 EVT VT =
Op.getValueType();
8063 if (SrcBitWidth > VTBits) {
8064 unsigned SrcExtraBits = SrcBitWidth - VTBits;
8065 if (Common > SrcExtraBits)
8066 return (Common - SrcExtraBits);
8069 assert (SrcBitWidth == VTBits &&
"Expected operands of same bitwidth.");
8076 unsigned Depth)
const {
8077 if (
Op.getResNo() != 0)
8079 unsigned Opcode =
Op.getOpcode();
8081 unsigned Id =
Op.getConstantOperandVal(0);
8083 case Intrinsic::s390_vpksh:
8084 case Intrinsic::s390_vpksf:
8085 case Intrinsic::s390_vpksg:
8086 case Intrinsic::s390_vpkshs:
8087 case Intrinsic::s390_vpksfs:
8088 case Intrinsic::s390_vpksgs:
8089 case Intrinsic::s390_vpklsh:
8090 case Intrinsic::s390_vpklsf:
8091 case Intrinsic::s390_vpklsg:
8092 case Intrinsic::s390_vpklshs:
8093 case Intrinsic::s390_vpklsfs:
8094 case Intrinsic::s390_vpklsgs:
8095 case Intrinsic::s390_vpdi:
8096 case Intrinsic::s390_vsldb:
8097 case Intrinsic::s390_vperm:
8099 case Intrinsic::s390_vuphb:
8100 case Intrinsic::s390_vuphh:
8101 case Intrinsic::s390_vuphf:
8102 case Intrinsic::s390_vuplb:
8103 case Intrinsic::s390_vuplhw:
8104 case Intrinsic::s390_vuplf: {
8108 EVT VT =
Op.getValueType();
8132 switch (
Op->getOpcode()) {
8145 "Unexpected stack alignment");
8148 unsigned StackProbeSize =
8151 StackProbeSize &= ~(StackAlign - 1);
8152 return StackProbeSize ? StackProbeSize : StackAlign;
8169 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
8175 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
8201 if (Succ->isLiveIn(SystemZ::CC))
8212 switch (
MI.getOpcode()) {
8213 case SystemZ::Select32:
8214 case SystemZ::Select64:
8215 case SystemZ::Select128:
8216 case SystemZ::SelectF32:
8217 case SystemZ::SelectF64:
8218 case SystemZ::SelectF128:
8219 case SystemZ::SelectVR32:
8220 case SystemZ::SelectVR64:
8221 case SystemZ::SelectVR128:
8253 for (
auto *
MI : Selects) {
8254 Register DestReg =
MI->getOperand(0).getReg();
8255 Register TrueReg =
MI->getOperand(1).getReg();
8256 Register FalseReg =
MI->getOperand(2).getReg();
8261 if (
MI->getOperand(4).getImm() == (CCValid ^ CCMask))
8264 if (RegRewriteTable.
contains(TrueReg))
8265 TrueReg = RegRewriteTable[TrueReg].first;
8267 if (RegRewriteTable.
contains(FalseReg))
8268 FalseReg = RegRewriteTable[FalseReg].second;
8271 BuildMI(*SinkMBB, SinkInsertionPoint,
DL,
TII->get(SystemZ::PHI), DestReg)
8276 RegRewriteTable[DestReg] = std::make_pair(TrueReg, FalseReg);
8288 assert(TFL->hasReservedCallFrame(MF) &&
8289 "ADJSTACKDOWN and ADJSTACKUP should be no-ops");
8294 uint32_t NumBytes =
MI.getOperand(0).getImm();
8299 MI.eraseFromParent();
8310 unsigned CCValid =
MI.getOperand(3).getImm();
8311 unsigned CCMask =
MI.getOperand(4).getImm();
8323 assert(NextMI.getOperand(3).getImm() == CCValid &&
8324 "Bad CCValid operands since CC was not redefined.");
8325 if (NextMI.getOperand(4).getImm() == CCMask ||
8326 NextMI.getOperand(4).getImm() == (CCValid ^ CCMask)) {
8332 if (NextMI.definesRegister(SystemZ::CC,
nullptr) ||
8333 NextMI.usesCustomInsertionHook())
8336 for (
auto *SelMI : Selects)
8337 if (NextMI.readsVirtualRegister(SelMI->getOperand(0).getReg())) {
8341 if (NextMI.isDebugInstr()) {
8343 assert(NextMI.isDebugValue() &&
"Unhandled debug opcode.");
8346 }
else if (
User || ++Count > 20)
8351 bool CCKilled = (LastMI->
killsRegister(SystemZ::CC,
nullptr) ||
8383 for (
auto *SelMI : Selects)
8384 SelMI->eraseFromParent();
8387 for (
auto *DbgMI : DbgValues)
8388 MBB->
splice(InsertPos, StartMBB, DbgMI);
8399 unsigned StoreOpcode,
8400 unsigned STOCOpcode,
8401 bool Invert)
const {
8406 int64_t Disp =
MI.getOperand(2).getImm();
8407 Register IndexReg =
MI.getOperand(3).getReg();
8408 unsigned CCValid =
MI.getOperand(4).getImm();
8409 unsigned CCMask =
MI.getOperand(5).getImm();
8412 StoreOpcode =
TII->getOpcodeForOffset(StoreOpcode, Disp);
8417 for (
auto *
I :
MI.memoperands())
8426 if (STOCOpcode && !IndexReg && Subtarget.hasLoadStoreOnCond()) {
8438 MI.eraseFromParent();
8452 if (!
MI.killsRegister(SystemZ::CC,
nullptr) &&
8479 MI.eraseFromParent();
8515 int HiOpcode =
Unsigned? SystemZ::VECLG : SystemZ::VECG;
8534 Register Temp =
MRI.createVirtualRegister(&SystemZ::VR128BitRegClass);
8542 MI.eraseFromParent();
8553 bool Invert)
const {
8562 int64_t Disp =
MI.getOperand(2).getImm();
8564 Register BitShift =
MI.getOperand(4).getReg();
8565 Register NegBitShift =
MI.getOperand(5).getReg();
8566 unsigned BitSize =
MI.getOperand(6).getImm();
8570 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
8571 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
8572 assert(LOpcode && CSOpcode &&
"Displacement out of range");
8575 Register OrigVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8576 Register OldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8577 Register NewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8578 Register RotatedOldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8579 Register RotatedNewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8610 Register Tmp =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8615 }
else if (BinOpcode)
8638 MI.eraseFromParent();
8649 unsigned KeepOldMask)
const {
8657 int64_t Disp =
MI.getOperand(2).getImm();
8659 Register BitShift =
MI.getOperand(4).getReg();
8660 Register NegBitShift =
MI.getOperand(5).getReg();
8661 unsigned BitSize =
MI.getOperand(6).getImm();
8665 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
8666 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
8667 assert(LOpcode && CSOpcode &&
"Displacement out of range");
8670 Register OrigVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8671 Register OldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8672 Register NewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8673 Register RotatedOldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8674 Register RotatedAltVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8675 Register RotatedNewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8742 MI.eraseFromParent();
8758 int64_t Disp =
MI.getOperand(2).getImm();
8760 Register OrigSwapVal =
MI.getOperand(4).getReg();
8761 Register BitShift =
MI.getOperand(5).getReg();
8762 Register NegBitShift =
MI.getOperand(6).getReg();
8763 int64_t BitSize =
MI.getOperand(7).getImm();
8769 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
8770 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
8771 unsigned ZExtOpcode = BitSize == 8 ? SystemZ::LLCR : SystemZ::LLHR;
8772 assert(LOpcode && CSOpcode &&
"Displacement out of range");
8775 Register OrigOldVal =
MRI.createVirtualRegister(RC);
8778 Register StoreVal =
MRI.createVirtualRegister(RC);
8779 Register OldValRot =
MRI.createVirtualRegister(RC);
8780 Register RetryOldVal =
MRI.createVirtualRegister(RC);
8781 Register RetrySwapVal =
MRI.createVirtualRegister(RC);
8856 if (!
MI.registerDefIsDead(SystemZ::CC,
nullptr))
8859 MI.eraseFromParent();
8872 .
add(
MI.getOperand(1))
8873 .
addImm(SystemZ::subreg_h64)
8874 .
add(
MI.getOperand(2))
8875 .
addImm(SystemZ::subreg_l64);
8876 MI.eraseFromParent();
8885 bool ClearEven)
const {
8893 Register In128 =
MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
8897 Register NewIn128 =
MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
8898 Register Zero64 =
MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
8909 MI.eraseFromParent();
8916 unsigned Opcode,
bool IsMemset)
const {
8923 uint64_t DestDisp =
MI.getOperand(1).getImm();
8929 if (!isUInt<12>(Disp)) {
8930 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
8931 unsigned Opcode =
TII->getOpcodeForOffset(SystemZ::LA, Disp);
8941 SrcDisp =
MI.getOperand(3).getImm();
8944 SrcDisp = DestDisp++;
8945 foldDisplIfNeeded(DestBase, DestDisp);
8949 bool IsImmForm = LengthMO.
isImm();
8950 bool IsRegForm = !IsImmForm;
8957 unsigned Length) ->
void {
8976 bool NeedsLoop =
false;
8978 Register LenAdjReg = SystemZ::NoRegister;
8980 ImmLength = LengthMO.
getImm();
8981 ImmLength += IsMemset ? 2 : 1;
8982 if (ImmLength == 0) {
8983 MI.eraseFromParent();
8986 if (Opcode == SystemZ::CLC) {
8987 if (ImmLength > 3 * 256)
8997 }
else if (ImmLength > 6 * 256)
9005 LenAdjReg = LengthMO.
getReg();
9011 (Opcode == SystemZ::CLC && (ImmLength > 256 || NeedsLoop)
9017 MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
9019 TII->loadImmediate(*
MBB,
MI, StartCountReg, ImmLength / 256);
9030 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9034 if (DestBase.
isReg() && DestBase.
getReg() == SystemZ::NoRegister)
9035 DestBase = loadZeroAddress();
9036 if (SrcBase.
isReg() && SrcBase.
getReg() == SystemZ::NoRegister)
9037 SrcBase = HaveSingleBase ? DestBase : loadZeroAddress();
9047 (HaveSingleBase ? StartSrcReg :
forceReg(
MI, DestBase,
TII));
9050 Register ThisSrcReg =
MRI.createVirtualRegister(RC);
9052 (HaveSingleBase ? ThisSrcReg :
MRI.createVirtualRegister(RC));
9053 Register NextSrcReg =
MRI.createVirtualRegister(RC);
9055 (HaveSingleBase ? NextSrcReg :
MRI.createVirtualRegister(RC));
9056 RC = &SystemZ::GR64BitRegClass;
9057 Register ThisCountReg =
MRI.createVirtualRegister(RC);
9058 Register NextCountReg =
MRI.createVirtualRegister(RC);
9084 MBB = MemsetOneCheckMBB;
9127 if (EndMBB && !ImmLength)
9149 if (!HaveSingleBase)
9156 if (Opcode == SystemZ::MVC)
9183 if (!HaveSingleBase)
9205 Register RemSrcReg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9206 Register RemDestReg = HaveSingleBase ? RemSrcReg
9207 :
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9211 if (!HaveSingleBase)
9227 if (Opcode != SystemZ::MVC) {
9237 while (ImmLength > 0) {
9241 foldDisplIfNeeded(DestBase, DestDisp);
9242 foldDisplIfNeeded(SrcBase, SrcDisp);
9243 insertMemMemOp(
MBB,
MI, DestBase, DestDisp, SrcBase, SrcDisp, ThisLength);
9244 DestDisp += ThisLength;
9245 SrcDisp += ThisLength;
9246 ImmLength -= ThisLength;
9249 if (EndMBB && ImmLength > 0) {
9265 MI.eraseFromParent();
9278 uint64_t End1Reg =
MI.getOperand(0).getReg();
9279 uint64_t Start1Reg =
MI.getOperand(1).getReg();
9280 uint64_t Start2Reg =
MI.getOperand(2).getReg();
9281 uint64_t CharReg =
MI.getOperand(3).getReg();
9284 uint64_t This1Reg =
MRI.createVirtualRegister(RC);
9285 uint64_t This2Reg =
MRI.createVirtualRegister(RC);
9324 MI.eraseFromParent();
9331 bool NoFloat)
const {
9337 MI.setDesc(
TII->get(Opcode));
9341 uint64_t Control =
MI.getOperand(2).getImm();
9342 static const unsigned GPRControlBit[16] = {
9343 0x8000, 0x8000, 0x4000, 0x4000, 0x2000, 0x2000, 0x1000, 0x1000,
9344 0x0800, 0x0800, 0x0400, 0x0400, 0x0200, 0x0200, 0x0100, 0x0100
9346 Control |= GPRControlBit[15];
9348 Control |= GPRControlBit[11];
9349 MI.getOperand(2).setImm(Control);
9352 for (
int I = 0;
I < 16;
I++) {
9353 if ((Control & GPRControlBit[
I]) == 0) {
9360 if (!NoFloat && (Control & 4) != 0) {
9361 if (Subtarget.hasVector()) {
9393 MI.eraseFromParent();
9406 Register SizeReg =
MI.getOperand(2).getReg();
9418 Register PHIReg =
MRI->createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9419 Register IncReg =
MRI->createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9484 MI.eraseFromParent();
9488SDValue SystemZTargetLowering::
9499 switch (
MI.getOpcode()) {
9500 case SystemZ::ADJCALLSTACKDOWN:
9501 case SystemZ::ADJCALLSTACKUP:
9502 return emitAdjCallStack(
MI,
MBB);
9504 case SystemZ::Select32:
9505 case SystemZ::Select64:
9506 case SystemZ::Select128:
9507 case SystemZ::SelectF32:
9508 case SystemZ::SelectF64:
9509 case SystemZ::SelectF128:
9510 case SystemZ::SelectVR32:
9511 case SystemZ::SelectVR64:
9512 case SystemZ::SelectVR128:
9513 return emitSelect(
MI,
MBB);
9515 case SystemZ::CondStore8Mux:
9516 return emitCondStore(
MI,
MBB, SystemZ::STCMux, 0,
false);
9517 case SystemZ::CondStore8MuxInv:
9518 return emitCondStore(
MI,
MBB, SystemZ::STCMux, 0,
true);
9519 case SystemZ::CondStore16Mux:
9520 return emitCondStore(
MI,
MBB, SystemZ::STHMux, 0,
false);
9521 case SystemZ::CondStore16MuxInv:
9522 return emitCondStore(
MI,
MBB, SystemZ::STHMux, 0,
true);
9523 case SystemZ::CondStore32Mux:
9524 return emitCondStore(
MI,
MBB, SystemZ::STMux, SystemZ::STOCMux,
false);
9525 case SystemZ::CondStore32MuxInv:
9526 return emitCondStore(
MI,
MBB, SystemZ::STMux, SystemZ::STOCMux,
true);
9527 case SystemZ::CondStore8:
9528 return emitCondStore(
MI,
MBB, SystemZ::STC, 0,
false);
9529 case SystemZ::CondStore8Inv:
9530 return emitCondStore(
MI,
MBB, SystemZ::STC, 0,
true);
9531 case SystemZ::CondStore16:
9532 return emitCondStore(
MI,
MBB, SystemZ::STH, 0,
false);
9533 case SystemZ::CondStore16Inv:
9534 return emitCondStore(
MI,
MBB, SystemZ::STH, 0,
true);
9535 case SystemZ::CondStore32:
9536 return emitCondStore(
MI,
MBB, SystemZ::ST, SystemZ::STOC,
false);
9537 case SystemZ::CondStore32Inv:
9538 return emitCondStore(
MI,
MBB, SystemZ::ST, SystemZ::STOC,
true);
9539 case SystemZ::CondStore64:
9540 return emitCondStore(
MI,
MBB, SystemZ::STG, SystemZ::STOCG,
false);
9541 case SystemZ::CondStore64Inv:
9542 return emitCondStore(
MI,
MBB, SystemZ::STG, SystemZ::STOCG,
true);
9543 case SystemZ::CondStoreF32:
9544 return emitCondStore(
MI,
MBB, SystemZ::STE, 0,
false);
9545 case SystemZ::CondStoreF32Inv:
9546 return emitCondStore(
MI,
MBB, SystemZ::STE, 0,
true);
9547 case SystemZ::CondStoreF64:
9548 return emitCondStore(
MI,
MBB, SystemZ::STD, 0,
false);
9549 case SystemZ::CondStoreF64Inv:
9550 return emitCondStore(
MI,
MBB, SystemZ::STD, 0,
true);
9552 case SystemZ::SCmp128Hi:
9553 return emitICmp128Hi(
MI,
MBB,
false);
9554 case SystemZ::UCmp128Hi:
9555 return emitICmp128Hi(
MI,
MBB,
true);
9557 case SystemZ::PAIR128:
9558 return emitPair128(
MI,
MBB);
9559 case SystemZ::AEXT128:
9560 return emitExt128(
MI,
MBB,
false);
9561 case SystemZ::ZEXT128:
9562 return emitExt128(
MI,
MBB,
true);
9564 case SystemZ::ATOMIC_SWAPW:
9565 return emitAtomicLoadBinary(
MI,
MBB, 0);
9567 case SystemZ::ATOMIC_LOADW_AR:
9568 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::AR);
9569 case SystemZ::ATOMIC_LOADW_AFI:
9570 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::AFI);
9572 case SystemZ::ATOMIC_LOADW_SR:
9573 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::SR);
9575 case SystemZ::ATOMIC_LOADW_NR:
9576 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NR);
9577 case SystemZ::ATOMIC_LOADW_NILH:
9578 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NILH);
9580 case SystemZ::ATOMIC_LOADW_OR:
9581 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::OR);
9582 case SystemZ::ATOMIC_LOADW_OILH:
9583 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::OILH);
9585 case SystemZ::ATOMIC_LOADW_XR:
9586 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::XR);
9587 case SystemZ::ATOMIC_LOADW_XILF:
9588 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::XILF);
9590 case SystemZ::ATOMIC_LOADW_NRi:
9591 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NR,
true);
9592 case SystemZ::ATOMIC_LOADW_NILHi:
9593 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NILH,
true);
9595 case SystemZ::ATOMIC_LOADW_MIN:
9597 case SystemZ::ATOMIC_LOADW_MAX:
9599 case SystemZ::ATOMIC_LOADW_UMIN:
9601 case SystemZ::ATOMIC_LOADW_UMAX:
9604 case SystemZ::ATOMIC_CMP_SWAPW:
9605 return emitAtomicCmpSwapW(
MI,
MBB);
9606 case SystemZ::MVCImm:
9607 case SystemZ::MVCReg:
9608 return emitMemMemWrapper(
MI,
MBB, SystemZ::MVC);
9609 case SystemZ::NCImm:
9610 return emitMemMemWrapper(
MI,
MBB, SystemZ::NC);
9611 case SystemZ::OCImm:
9612 return emitMemMemWrapper(
MI,
MBB, SystemZ::OC);
9613 case SystemZ::XCImm:
9614 case SystemZ::XCReg:
9615 return emitMemMemWrapper(
MI,
MBB, SystemZ::XC);
9616 case SystemZ::CLCImm:
9617 case SystemZ::CLCReg:
9618 return emitMemMemWrapper(
MI,
MBB, SystemZ::CLC);
9619 case SystemZ::MemsetImmImm:
9620 case SystemZ::MemsetImmReg:
9621 case SystemZ::MemsetRegImm:
9622 case SystemZ::MemsetRegReg:
9623 return emitMemMemWrapper(
MI,
MBB, SystemZ::MVC,
true);
9624 case SystemZ::CLSTLoop:
9625 return emitStringWrapper(
MI,
MBB, SystemZ::CLST);
9626 case SystemZ::MVSTLoop:
9627 return emitStringWrapper(
MI,
MBB, SystemZ::MVST);
9628 case SystemZ::SRSTLoop:
9629 return emitStringWrapper(
MI,
MBB, SystemZ::SRST);
9630 case SystemZ::TBEGIN:
9631 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGIN,
false);
9632 case SystemZ::TBEGIN_nofloat:
9633 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGIN,
true);
9634 case SystemZ::TBEGINC:
9635 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGINC,
true);
9636 case SystemZ::LTEBRCompare_Pseudo:
9637 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTEBR);
9638 case SystemZ::LTDBRCompare_Pseudo:
9639 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTDBR);
9640 case SystemZ::LTXBRCompare_Pseudo:
9641 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTXBR);
9643 case SystemZ::PROBED_ALLOCA:
9644 return emitProbedAlloca(
MI,
MBB);
9646 case TargetOpcode::STACKMAP:
9647 case TargetOpcode::PATCHPOINT:
9658SystemZTargetLowering::getRepRegClassFor(
MVT VT)
const {
9659 if (VT == MVT::Untyped)
9660 return &SystemZ::ADDR128BitRegClass;
9686 DAG.
getMachineNode(SystemZ::EFPC, dl, {MVT::i32, MVT::Other}, Chain), 0);
9706 EVT VT =
Op.getValueType();
9707 Op =
Op.getOperand(0);
9708 EVT OpVT =
Op.getValueType();
9710 assert(OpVT.
isVector() &&
"Operand type for VECREDUCE_ADD is not a vector.");
unsigned const MachineRegisterInfo * MRI
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
amdgpu AMDGPU Register Bank Select
static bool isZeroVector(SDValue N)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
static bool isUndef(ArrayRef< int > Mask)
unsigned const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
const char LLVMTargetMachineRef TM
static bool isSelectPseudo(MachineInstr &MI)
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static void adjustForLTGFR(Comparison &C)
static void adjustSubwordCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static SDValue joinDwords(SelectionDAG &DAG, const SDLoc &DL, SDValue Op0, SDValue Op1)
static bool isOnlyUsedByStores(SDValue StoredVal, SelectionDAG &DAG)
static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT, unsigned Opcode, SDValue Op0, SDValue Op1, SDValue &Even, SDValue &Odd)
static void adjustForRedundantAnd(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static SDValue buildScalarToVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Value)
static SDValue lowerI128ToGR128(SelectionDAG &DAG, SDValue In)
static bool isSimpleShift(SDValue N, unsigned &ShiftVal)
static bool isI128MovedToParts(LoadSDNode *LD, SDNode *&LoPart, SDNode *&HiPart)
static bool chooseShuffleOpNos(int *OpNos, unsigned &OpNo0, unsigned &OpNo1)
static uint32_t findZeroVectorIdx(SDValue *Ops, unsigned Num)
static bool isVectorElementSwap(ArrayRef< int > M, EVT VT)
static void getCSAddressAndShifts(SDValue Addr, SelectionDAG &DAG, SDLoc DL, SDValue &AlignedAddr, SDValue &BitShift, SDValue &NegBitShift)
static bool isShlDoublePermute(const SmallVectorImpl< int > &Bytes, unsigned &StartIndex, unsigned &OpNo0, unsigned &OpNo1)
static SDValue getPermuteNode(SelectionDAG &DAG, const SDLoc &DL, const Permute &P, SDValue Op0, SDValue Op1)
static SDNode * emitIntrinsicWithCCAndChain(SelectionDAG &DAG, SDValue Op, unsigned Opcode)
static SDValue getCCResult(SelectionDAG &DAG, SDValue CCReg)
static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode, unsigned &CCValid)
static void lowerMUL_LOHI32(SelectionDAG &DAG, const SDLoc &DL, unsigned Extend, SDValue Op0, SDValue Op1, SDValue &Hi, SDValue &Lo)
static bool isF128MovedToParts(LoadSDNode *LD, SDNode *&LoPart, SDNode *&HiPart)
static void createPHIsForSelects(SmallVector< MachineInstr *, 8 > &Selects, MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB, MachineBasicBlock *SinkMBB)
static SDValue getGeneralPermuteNode(SelectionDAG &DAG, const SDLoc &DL, SDValue *Ops, const SmallVectorImpl< int > &Bytes)
static unsigned getVectorComparisonOrInvert(ISD::CondCode CC, CmpMode Mode, bool &Invert)
static unsigned CCMaskForCondCode(ISD::CondCode CC)
static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static void adjustForFNeg(Comparison &C)
static bool isScalarToVector(SDValue Op)
static SDValue emitSETCC(SelectionDAG &DAG, const SDLoc &DL, SDValue CCReg, unsigned CCValid, unsigned CCMask)
static bool matchPermute(const SmallVectorImpl< int > &Bytes, const Permute &P, unsigned &OpNo0, unsigned &OpNo1)
static bool isAddCarryChain(SDValue Carry)
static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static MachineOperand earlyUseOperand(MachineOperand Op)
static bool canUseSiblingCall(const CCState &ArgCCInfo, SmallVectorImpl< CCValAssign > &ArgLocs, SmallVectorImpl< ISD::OutputArg > &Outs)
static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask)
static bool getzOSCalleeAndADA(SelectionDAG &DAG, SDValue &Callee, SDValue &ADA, SDLoc &DL, SDValue &Chain)
static bool shouldSwapCmpOperands(const Comparison &C)
static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType)
static SDValue getADAEntry(SelectionDAG &DAG, SDValue Val, SDLoc DL, unsigned Offset, bool LoadAdr=false)
static SDNode * emitIntrinsicWithCC(SelectionDAG &DAG, SDValue Op, unsigned Opcode)
static void adjustForSubtraction(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static bool getVPermMask(SDValue ShuffleOp, SmallVectorImpl< int > &Bytes)
static const Permute PermuteForms[]
static bool isI128MovedFromParts(SDValue Val, SDValue &LoPart, SDValue &HiPart)
static bool isSubBorrowChain(SDValue Carry)
static void adjustICmp128(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static APInt getDemandedSrcElements(SDValue Op, const APInt &DemandedElts, unsigned OpNo)
static SDValue getAbsolute(SelectionDAG &DAG, const SDLoc &DL, SDValue Op, bool IsNegative)
static unsigned computeNumSignBitsBinOp(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth, unsigned OpNo)
static SDValue expandBitCastI128ToF128(SelectionDAG &DAG, SDValue Src, const SDLoc &SL)
static SDValue tryBuildVectorShuffle(SelectionDAG &DAG, BuildVectorSDNode *BVN)
static unsigned getVectorComparison(ISD::CondCode CC, CmpMode Mode)
static SDValue lowerGR128ToI128(SelectionDAG &DAG, SDValue In)
static SDValue MergeInputChains(SDNode *N1, SDNode *N2)
static SDValue expandBitCastF128ToI128(SelectionDAG &DAG, SDValue Src, const SDLoc &SL)
static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask, uint64_t Mask, uint64_t CmpVal, unsigned ICmpType)
static bool isIntrinsicWithCC(SDValue Op, unsigned &Opcode, unsigned &CCValid)
static SDValue expandV4F32ToV2F64(SelectionDAG &DAG, int Start, const SDLoc &DL, SDValue Op, SDValue Chain)
static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1, ISD::CondCode Cond, const SDLoc &DL, SDValue Chain=SDValue(), bool IsSignaling=false)
static bool checkCCKill(MachineInstr &MI, MachineBasicBlock *MBB)
static Register forceReg(MachineInstr &MI, MachineOperand &Base, const SystemZInstrInfo *TII)
static bool is32Bit(EVT VT)
static std::pair< unsigned, const TargetRegisterClass * > parseRegisterNumber(StringRef Constraint, const TargetRegisterClass *RC, const unsigned *Map, unsigned Size)
static bool matchDoublePermute(const SmallVectorImpl< int > &Bytes, const Permute &P, SmallVectorImpl< int > &Transform)
static Comparison getIntrinsicCmp(SelectionDAG &DAG, unsigned Opcode, SDValue Call, unsigned CCValid, uint64_t CC, ISD::CondCode Cond)
static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg)
static AddressingMode getLoadStoreAddrMode(bool HasVector, Type *Ty)
static SDValue buildMergeScalars(SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Op0, SDValue Op1)
static void computeKnownBitsBinOp(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth, unsigned OpNo)
static bool getShuffleInput(const SmallVectorImpl< int > &Bytes, unsigned Start, unsigned BytesPerElement, int &Base)
static AddressingMode supportedAddressingMode(Instruction *I, bool HasVector)
static bool isF128MovedFromParts(SDValue Val, SDValue &LoPart, SDValue &HiPart)
static void adjustZeroCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
Class for arbitrary precision integers.
APInt zext(unsigned width) const
Zero extend to a new width.
uint64_t getZExtValue() const
Get zero extended value.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
unsigned getActiveBits() const
Compute the number of active bits in the value.
APInt trunc(unsigned width) const
Truncate to new width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isSingleWord() const
Determine if this APInt just has one word to store value.
void insertBits(const APInt &SubBits, unsigned bitPosition)
Insert the bits from a smaller APInt starting at bitPosition.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
an instruction that atomically reads a memory location, combines it with another value,...
BinOp getOperation() const
StringRef getValueAsString() const
Return the attribute's value as a string.
The address of a basic block.
A "pseudo-class" with methods for operating on BUILD_VECTORs.
bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector.
CCState - This class holds information needed while lowering arguments and return values.
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
LocInfo getLocInfo() const
int64_t getLocMemOffset() const
This class represents a function call, abstracting a target machine's calling convention.
This is an important base class in LLVM.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
uint64_t getFnAttributeAsParsedInteger(StringRef Kind, uint64_t Default=0) const
For a string attribute Kind, parse attribute as an integer.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
const GlobalObject * getAliaseeObject() const
bool hasPrivateLinkage() const
bool hasInternalLinkage() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
This class is used to represent ISD::LOAD nodes.
static auto integer_fixedlen_vector_valuetypes()
bool isVector() const
Return true if this is a vector value type.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
static MVT getVectorVT(MVT VT, unsigned NumElements)
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator_range< succ_iterator > successors()
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setAdjustsStack(bool V)
void setFrameAddressIsTaken(bool T)
void setReturnAddressIsTaken(bool s)
unsigned getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
void setMaxCallFrameSize(unsigned S)
MachineFunctionProperties & reset(Property P)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
reverse_iterator rbegin()
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineFunctionProperties & getProperties() const
Get the function properties.
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr reads the specified register.
bool killsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr kills the specified register.
bool definesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr fully defines the specified register.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
Register getReg() const
getReg - Returns the register number.
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
This class provides iterator support for SDUse operands that use a specific SDNode.
Represents one node in the SelectionDAG.
bool hasOneUse() const
Return true if there is exactly one use of this node.
iterator_range< use_iterator > uses()
SDNodeFlags getFlags() const
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
void setFlags(SDNodeFlags NewFlags)
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
bool isMachineOpcode() const
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
const APInt & getConstantOperandAPInt(unsigned i) const
uint64_t getScalarValueSizeInBits() const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
unsigned getMachineOpcode() const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
SDNode * isConstantIntBuildVectorOrConstantInt(SDValue N) const
Test whether the given value is a constant int or similar node.
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getGLOBAL_OFFSET_TABLE(EVT VT)
Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, MachineMemOperand *MMO)
Gets a node for an atomic op, produces result (if relevant) and chain and takes 2 operands.
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
const TargetLowering & getTargetLoweringInfo() const
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
const DataLayout & getDataLayout() const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
bool SignBitIsZero(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero.
SDValue getRegister(unsigned Reg, EVT VT)
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getRegisterMask(const uint32_t *RegMask)
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
ArrayRef< int > getMask() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
constexpr size_t size() const
size - Get the string size.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
A SystemZ-specific class detailing special use registers particular for calling conventions.
virtual int getStackPointerBias()=0
virtual int getReturnFunctionAddressRegister()=0
virtual int getCallFrameSize()=0
virtual int getStackPointerRegister()=0
A SystemZ-specific constant pool value.
static SystemZConstantPoolValue * Create(const GlobalValue *GV, SystemZCP::SystemZCPModifier Modifier)
unsigned getVarArgsFrameIndex() const
void setVarArgsFrameIndex(unsigned FI)
void setRegSaveFrameIndex(unsigned FI)
void incNumLocalDynamicTLSAccesses()
Register getVarArgsFirstGPR() const
void setADAVirtualRegister(Register Reg)
void setVarArgsFirstGPR(Register GPR)
Register getADAVirtualRegister() const
void setSizeOfFnParams(unsigned Size)
void setVarArgsFirstFPR(Register FPR)
unsigned getRegSaveFrameIndex() const
Register getVarArgsFirstFPR() const
const SystemZInstrInfo * getInstrInfo() const override
bool isPC32DBLSymbol(const GlobalValue *GV, CodeModel::Model CM) const
const TargetFrameLowering * getFrameLowering() const override
bool isTargetXPLINK64() const
SystemZCallingConventionRegisters * getSpecialRegisters() const
const SystemZRegisterInfo * getRegisterInfo() const override
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes) const override
Returns the target specific optimal type for load and store operations as a result of memset,...
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
bool hasInlineStackProbe(const MachineFunction &MF) const override
Returns true if stack probing through inline assembly is requested.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *BB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
AtomicExpansionKind shouldCastAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be cast by the IR-level AtomicExpand pass.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &, EVT) const override
Return the ValueType of the result of SETCC operations.
bool allowTruncateForTailCall(Type *, Type *) const override
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
bool findOptimalMemOpLowering(std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const override
Determines the optimal series of memory ops to replace the memset / memcpy.
bool useSoftFloat() const override
std::pair< SDValue, SDValue > makeExternalCall(SDValue Chain, SelectionDAG &DAG, const char *CalleeName, EVT RetVT, ArrayRef< SDValue > Ops, CallingConv::ID CallConv, bool IsSigned, SDLoc DL, bool DoesNotReturn, bool IsReturnValueUsed) const
bool mayBeEmittedAsTailCall(const CallInst *CI) const override
Return true if the target may be able emit the call instruction as a tail call.
bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const override
Target-specific splitting of values into parts that fit a register storing a legal type.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
SystemZTargetLowering(const TargetMachine &TM, const SystemZSubtarget &STI)
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
bool isLegalICmpImmediate(int64_t Imm) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
TargetLowering::ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const override
Determine if the target supports unaligned memory accesses.
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
TargetLowering::ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const override
Target-specific combining of register parts into its original value.
bool isTruncateFree(Type *, Type *) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const override
Determine the number of bits in the operation that are sign bits.
void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
bool isLegalAddImmediate(int64_t Imm) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
bool isGuaranteedNotToBeUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, unsigned Depth) const override
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
AtomicExpansionKind shouldCastAtomicStoreInIR(StoreInst *SI) const override
Returns how the given (atomic) store should be cast by the IR-level AtomicExpand pass into.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
unsigned getStackProbeSize(const MachineFunction &MF) const
XPLINK64 calling convention specific use registers Particular to z/OS when in 64 bit mode.
int getCallFrameSize() final
Information about stack frame layout on the target.
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
virtual bool hasFP(const MachineFunction &MF) const =0
hasFP - Return true if the specified function should have a dedicated frame pointer register.
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const
Returns true if arguments should be sign-extended in lib calls.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setAtomicLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Let target indicate that an extending atomic load of the specified type is legal.
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
virtual const TargetRegisterClass * getRepRegClassFor(MVT VT) const
Return the 'representative' register class for the specified value type.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool findOptimalMemOpLowering(std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const
Determines the optimal series of memory ops to replace the memset / memcpy.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Primary interface to the complete machine description for the target machine.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
unsigned getPointerSize(unsigned AS) const
Get the pointer size for this target.
CodeModel::Model getCodeModel() const
Returns the code model.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
constexpr ScalarTy getFixedValue() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ MEMBARRIER
MEMBARRIER - Compiler barrier only; generate a no-op.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ STRICT_FSQRT
Constrained versions of libm-equivalent floating point intrinsics.
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
@ BR_JT
BR_JT - Jumptable branch.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ UNDEF
UNDEF - An undefined node.
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ GET_ROUNDING
Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest, ties to even 2 Round to ...
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ STRICT_SINT_TO_FP
STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to a floating point value.
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ STRICT_FADD
Constrained versions of the binary floating point operators.
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ BRCOND
BRCOND - Conditional branch.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ GET_DYNAMIC_AREA_OFFSET
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isNormalStore(const SDNode *N)
Returns true if the specified node is a non-truncating and unindexed store.
CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
@ Define
Register definition.
@ System
Synchronized with respect to all concurrently executing threads.
@ MO_ADA_DATA_SYMBOL_ADDR
@ MO_ADA_DIRECT_FUNC_DESC
@ MO_ADA_INDIRECT_FUNC_DESC
const unsigned GR64Regs[16]
const unsigned VR128Regs[32]
const unsigned GR128Regs[16]
const unsigned FP32Regs[16]
const unsigned GR32Regs[16]
const unsigned FP64Regs[16]
const int64_t ELFCallFrameSize
const unsigned VR64Regs[32]
const unsigned FP128Regs[16]
const unsigned VR32Regs[32]
unsigned odd128(bool Is32bit)
const unsigned CCMASK_CMP_GE
static bool isImmHH(uint64_t Val)
const unsigned CCMASK_TEND
const unsigned CCMASK_CS_EQ
const unsigned CCMASK_TBEGIN
const MCPhysReg ELFArgFPRs[ELFNumArgFPRs]
MachineBasicBlock * splitBlockBefore(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
const unsigned CCMASK_TM_SOME_1
const unsigned CCMASK_LOGICAL_CARRY
const unsigned TDCMASK_NORMAL_MINUS
const unsigned CCMASK_TDC
const unsigned CCMASK_FCMP
const unsigned CCMASK_TM_SOME_0
static bool isImmHL(uint64_t Val)
const unsigned TDCMASK_SUBNORMAL_MINUS
const unsigned TDCMASK_NORMAL_PLUS
const unsigned CCMASK_CMP_GT
const unsigned TDCMASK_QNAN_MINUS
const unsigned CCMASK_ANY
const unsigned CCMASK_ARITH
const unsigned CCMASK_TM_MIXED_MSB_0
const unsigned TDCMASK_SUBNORMAL_PLUS
static bool isImmLL(uint64_t Val)
const unsigned VectorBits
static bool isImmLH(uint64_t Val)
MachineBasicBlock * emitBlockAfter(MachineBasicBlock *MBB)
const unsigned TDCMASK_INFINITY_PLUS
unsigned reverseCCMask(unsigned CCMask)
const unsigned CCMASK_TM_ALL_0
const unsigned CCMASK_CMP_LE
const unsigned CCMASK_CMP_O
const unsigned CCMASK_CMP_EQ
const unsigned VectorBytes
const unsigned TDCMASK_INFINITY_MINUS
const unsigned CCMASK_ICMP
const unsigned CCMASK_VCMP_ALL
MachineBasicBlock * splitBlockAfter(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
const unsigned CCMASK_VCMP
const unsigned CCMASK_TM_MIXED_MSB_1
const unsigned CCMASK_TM_MSB_0
const unsigned CCMASK_ARITH_OVERFLOW
const unsigned CCMASK_CS_NE
const unsigned TDCMASK_SNAN_PLUS
const unsigned CCMASK_CMP_LT
const unsigned CCMASK_CMP_NE
const unsigned TDCMASK_ZERO_PLUS
const unsigned TDCMASK_QNAN_PLUS
const unsigned TDCMASK_ZERO_MINUS
unsigned even128(bool Is32bit)
const unsigned CCMASK_TM_ALL_1
const unsigned CCMASK_LOGICAL_BORROW
const unsigned ELFNumArgFPRs
const unsigned CCMASK_CMP_UO
const unsigned CCMASK_LOGICAL
const unsigned CCMASK_TM_MSB_1
const unsigned TDCMASK_SNAN_MINUS
Reg
All possible values of the reg field in the ModR/M byte.
support::ulittle32_t Word
NodeAddr< CodeNode * > Code
constexpr const char32_t SBase
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
testing::Matcher< const detail::ErrorHolder & > Failed()
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void dumpBytes(ArrayRef< uint8_t > Bytes, raw_ostream &OS)
Convert ‘Bytes’ to a hex string and output to ‘OS’.
T bit_ceil(T Value)
Returns the smallest integral power of two no smaller than Value if Value is nonzero.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
@ Mul
Product of integers.
DWARFExpression::Operation Op
constexpr unsigned BitWidth
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
AddressingMode(bool LongDispl, bool IdxReg)
This struct is a compact representation of a valid (non-zero power of two) alignment.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isRound() const
Return true if the size is a power-of-two number of bytes.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
unsigned getBitWidth() const
Get the bit width of this value.
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
void resetAll()
Resets the known state of all bits.
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
These are IR-level optimization flags that may be propagated to SDNodes.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
SystemZVectorConstantInfo(APInt IntImm)
SmallVector< unsigned, 2 > OpVals
bool isVectorConstantLegal(const SystemZSubtarget &Subtarget)
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This contains information for each constraint that we are lowering.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setZExtResult(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setSExtResult(bool Value=true)
CallLoweringInfo & setNoReturn(bool Value=true)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})