26#include "llvm/IR/IntrinsicsS390.h"
35#define DEBUG_TYPE "systemz-lower"
41 : Op0(Op0In), Op1(Op1In), Chain(ChainIn),
42 Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {}
92 if (Subtarget.hasHighWord())
98 if (Subtarget.hasVector()) {
105 if (Subtarget.hasVectorEnhancements1())
110 if (Subtarget.hasVector()) {
119 if (Subtarget.hasVector())
146 for (
unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
147 I <= MVT::LAST_FP_VALUETYPE;
173 for (
unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
174 I <= MVT::LAST_INTEGER_VALUETYPE;
205 if (Subtarget.hasPopulationCount())
224 if (!Subtarget.hasFPExtension())
230 if (Subtarget.hasFPExtension())
235 if (Subtarget.hasFPExtension())
304 {MVT::i8, MVT::i16, MVT::i32},
Legal);
306 {MVT::i8, MVT::i16},
Legal);
323 if (!Subtarget.hasFPExtension()) {
336 if (Subtarget.hasMiscellaneousExtensions3()) {
429 if (VT != MVT::v2i64)
435 if (Subtarget.hasVectorEnhancements1())
466 if (Subtarget.hasVector()) {
488 if (Subtarget.hasVectorEnhancements2()) {
509 for (
unsigned I = MVT::FIRST_FP_VALUETYPE;
510 I <= MVT::LAST_FP_VALUETYPE;
518 if (Subtarget.hasFPExtension()) {
546 if (Subtarget.hasFPExtension()) {
557 if (Subtarget.hasVector()) {
603 if (Subtarget.hasVectorEnhancements1()) {
610 if (Subtarget.hasVectorEnhancements1()) {
664 for (
auto VT : { MVT::f32, MVT::f64, MVT::f128,
665 MVT::v4f32, MVT::v2f64 }) {
674 if (!Subtarget.hasVectorEnhancements1()) {
680 if (Subtarget.hasVectorEnhancements1())
690 if (Subtarget.hasVectorEnhancements1()) {
702 if (!Subtarget.hasVector()) {
763 struct RTLibCallMapping {
767 static RTLibCallMapping RTLibCallCommon[] = {
768#define HANDLE_LIBCALL(code, name) {RTLIB::code, name},
769#include "ZOSLibcallNames.def"
771 for (
auto &E : RTLibCallCommon)
777 return Subtarget.hasSoftFloat();
799 return Subtarget.hasVectorEnhancements1();
812 if (!Subtarget.hasVector() ||
813 (isFP128 && !Subtarget.hasVectorEnhancements1()))
835 if (SplatBitSize > 64)
841 if (isInt<16>(SignedValue)) {
850 if (
TII->isRxSBGMask(
Value, SplatBitSize, Start,
End)) {
872 uint64_t Lower = SplatUndefZ & maskTrailingOnes<uint64_t>(LowerBits);
873 uint64_t Upper = SplatUndefZ & maskLeadingOnes<uint64_t>(UpperBits);
880 uint64_t Middle = SplatUndefZ & ~Upper & ~Lower;
881 return tryValue(SplatBitsZ | Middle);
896 unsigned HalfSize = Width / 2;
901 if (HighValue != LowValue || 8 > HalfSize)
904 SplatBits = HighValue;
908 SplatBitSize = Width;
916 BVN->
isConstantSplat(IntBits, SplatUndef, SplatBitSize, HasAnyUndefs, 128,
920 BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, 8,
925 bool ForCodeSize)
const {
927 if (Imm.isZero() || Imm.isNegZero())
959 if (Subtarget.hasInterlockedAccess1() &&
973 return isInt<32>(Imm) || isUInt<32>(Imm);
978 return isUInt<32>(Imm) || isUInt<32>(-Imm);
1000 LongDisplacement(LongDispl), IndexReg(IdxReg) {}
1023 switch (
II->getIntrinsicID()) {
1025 case Intrinsic::memset:
1026 case Intrinsic::memmove:
1027 case Intrinsic::memcpy:
1032 if (isa<LoadInst>(
I) &&
I->hasOneUse()) {
1033 auto *SingleUser = cast<Instruction>(*
I->user_begin());
1034 if (SingleUser->getParent() ==
I->getParent()) {
1035 if (isa<ICmpInst>(SingleUser)) {
1036 if (
auto *
C = dyn_cast<ConstantInt>(SingleUser->getOperand(1)))
1037 if (
C->getBitWidth() <= 64 &&
1038 (isInt<16>(
C->getSExtValue()) || isUInt<16>(
C->getZExtValue())))
1041 }
else if (isa<StoreInst>(SingleUser))
1045 }
else if (
auto *StoreI = dyn_cast<StoreInst>(
I)) {
1046 if (
auto *LoadI = dyn_cast<LoadInst>(StoreI->getValueOperand()))
1047 if (LoadI->hasOneUse() && LoadI->getParent() ==
I->getParent())
1052 if (HasVector && (isa<LoadInst>(
I) || isa<StoreInst>(
I))) {
1060 Type *MemAccessTy = (isa<LoadInst>(
I) ?
I->getType() :
1061 I->getOperand(0)->getType());
1063 bool IsVectorAccess = MemAccessTy->
isVectorTy();
1067 if (!IsVectorAccess && isa<StoreInst>(
I)) {
1068 Value *DataOp =
I->getOperand(0);
1069 if (isa<ExtractElementInst>(DataOp))
1070 IsVectorAccess =
true;
1075 if (!IsVectorAccess && isa<LoadInst>(
I) &&
I->hasOneUse()) {
1076 User *LoadUser = *
I->user_begin();
1077 if (isa<InsertElementInst>(LoadUser))
1078 IsVectorAccess =
true;
1081 if (IsFPAccess || IsVectorAccess)
1110 return AM.
Scale == 0;
1117 std::vector<EVT> &MemOps,
unsigned Limit,
const MemOp &
Op,
unsigned DstAS,
1118 unsigned SrcAS,
const AttributeList &FuncAttributes)
const {
1119 const int MVCFastLen = 16;
1121 if (Limit != ~
unsigned(0)) {
1123 if (
Op.isMemcpy() &&
Op.allowOverlap() &&
Op.size() <= MVCFastLen)
1125 if (
Op.isMemset() &&
Op.size() - 1 <= MVCFastLen)
1127 if (
Op.isZeroMemset())
1132 SrcAS, FuncAttributes);
1137 return Subtarget.hasVector() ? MVT::v2i64 : MVT::Other;
1141 if (!FromType->isIntegerTy() || !ToType->
isIntegerTy())
1143 unsigned FromBits = FromType->getPrimitiveSizeInBits().getFixedValue();
1145 return FromBits > ToBits;
1153 return FromBits > ToBits;
1162 if (Constraint.
size() == 1) {
1163 switch (Constraint[0]) {
1189 }
else if (Constraint.
size() == 2 && Constraint[0] ==
'Z') {
1190 switch (Constraint[1]) {
1206 const char *constraint)
const {
1208 Value *CallOperandVal =
info.CallOperandVal;
1211 if (!CallOperandVal)
1215 switch (*constraint) {
1233 if (Subtarget.hasVector())
1239 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1240 if (isUInt<8>(
C->getZExtValue()))
1245 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1246 if (isUInt<12>(
C->getZExtValue()))
1251 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1252 if (isInt<16>(
C->getSExtValue()))
1257 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1258 if (isInt<20>(
C->getSExtValue()))
1263 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1264 if (
C->getZExtValue() == 0x7fffffff)
1274static std::pair<unsigned, const TargetRegisterClass *>
1276 const unsigned *Map,
unsigned Size) {
1277 assert(*(Constraint.
end()-1) ==
'}' &&
"Missing '}'");
1278 if (isdigit(Constraint[2])) {
1283 return std::make_pair(Map[
Index], RC);
1285 return std::make_pair(0U,
nullptr);
1288std::pair<unsigned, const TargetRegisterClass *>
1291 if (Constraint.
size() == 1) {
1293 switch (Constraint[0]) {
1298 return std::make_pair(0U, &SystemZ::GR64BitRegClass);
1300 return std::make_pair(0U, &SystemZ::GR128BitRegClass);
1301 return std::make_pair(0U, &SystemZ::GR32BitRegClass);
1305 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass);
1306 else if (VT == MVT::i128)
1307 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass);
1308 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass);
1311 return std::make_pair(0U, &SystemZ::GRH32BitRegClass);
1316 return std::make_pair(0U, &SystemZ::FP64BitRegClass);
1318 return std::make_pair(0U, &SystemZ::FP128BitRegClass);
1319 return std::make_pair(0U, &SystemZ::FP32BitRegClass);
1324 if (Subtarget.hasVector()) {
1326 return std::make_pair(0U, &SystemZ::VR32BitRegClass);
1328 return std::make_pair(0U, &SystemZ::VR64BitRegClass);
1329 return std::make_pair(0U, &SystemZ::VR128BitRegClass);
1338 auto getVTSizeInBits = [&VT]() {
1346 if (Constraint[1] ==
'r') {
1347 if (getVTSizeInBits() == 32)
1350 if (getVTSizeInBits() == 128)
1356 if (Constraint[1] ==
'f') {
1358 return std::make_pair(
1360 if (getVTSizeInBits() == 32)
1363 if (getVTSizeInBits() == 128)
1369 if (Constraint[1] ==
'v') {
1370 if (!Subtarget.hasVector())
1371 return std::make_pair(
1373 if (getVTSizeInBits() == 32)
1376 if (getVTSizeInBits() == 64)
1403 const Constant *PersonalityFn)
const {
1408 const Constant *PersonalityFn)
const {
1416 if (Constraint.
size() == 1) {
1417 switch (Constraint[0]) {
1419 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1420 if (isUInt<8>(
C->getZExtValue()))
1422 Op.getValueType()));
1426 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1427 if (isUInt<12>(
C->getZExtValue()))
1429 Op.getValueType()));
1433 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1434 if (isInt<16>(
C->getSExtValue()))
1436 Op.getValueType()));
1440 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1441 if (isInt<20>(
C->getSExtValue()))
1443 Op.getValueType()));
1447 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1448 if (
C->getZExtValue() == 0x7fffffff)
1450 Op.getValueType()));
1461#include "SystemZGenCallingConv.inc"
1465 static const MCPhysReg ScratchRegs[] = { SystemZ::R0D, SystemZ::R1D,
1471 Type *ToType)
const {
1534 if (BitCastToType == MVT::v2i64)
1561 MVT::Untyped,
Hi,
Lo);
1585 unsigned NumParts,
MVT PartVT, std::optional<CallingConv::ID>
CC)
const {
1587 if (ValueVT.
getSizeInBits() == 128 && NumParts == 1 && PartVT == MVT::Untyped) {
1598 MVT PartVT,
EVT ValueVT, std::optional<CallingConv::ID>
CC)
const {
1599 if (ValueVT.
getSizeInBits() == 128 && NumParts == 1 && PartVT == MVT::Untyped) {
1626 unsigned NumFixedGPRs = 0;
1627 unsigned NumFixedFPRs = 0;
1628 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
1641 RC = &SystemZ::GR32BitRegClass;
1645 RC = &SystemZ::GR64BitRegClass;
1649 RC = &SystemZ::FP32BitRegClass;
1653 RC = &SystemZ::FP64BitRegClass;
1657 RC = &SystemZ::FP128BitRegClass;
1665 RC = &SystemZ::VR128BitRegClass;
1694 ArgValue = DAG.
getLoad(LocVT,
DL, Chain, FIN,
1705 unsigned ArgIndex = Ins[
I].OrigArgIndex;
1706 assert (Ins[
I].PartOffset == 0);
1707 while (
I + 1 != E && Ins[
I + 1].OrigArgIndex == ArgIndex) {
1709 unsigned PartOffset = Ins[
I + 1].PartOffset;
1732 int64_t VarArgOffset = CCInfo.
getStackSize() + Regs->getCallFrameSize();
1750 int64_t RegSaveOffset =
1765 &SystemZ::FP64BitRegClass);
1783 MRI.addLiveIn(Regs->getADARegister(), ADAvReg);
1795 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
1802 if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D)
1804 if (Outs[
I].Flags.isSwiftSelf() || Outs[
I].Flags.isSwiftError())
1811 unsigned Offset,
bool LoadAdr =
false) {
1834 bool LoadAddr =
false;
1835 const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV);
1856 unsigned ADADelta = 0;
1857 unsigned EPADelta = 8;
1862 if (
auto *
G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1863 bool IsInternal = (
G->getGlobal()->hasInternalLinkage() ||
1864 G->getGlobal()->hasPrivateLinkage());
1879 }
else if (
auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1941 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
1947 unsigned ArgIndex = Outs[
I].OrigArgIndex;
1949 if (
I + 1 != E && Outs[
I + 1].OrigArgIndex == ArgIndex) {
1951 Type *OrigArgType = CLI.
Args[Outs[
I].OrigArgIndex].Ty;
1957 SlotVT = Outs[
I].VT;
1960 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
1966 assert (Outs[
I].PartOffset == 0);
1967 while (
I + 1 != E && Outs[
I + 1].OrigArgIndex == ArgIndex) {
1968 SDValue PartValue = OutVals[
I + 1];
1969 unsigned PartOffset = Outs[
I + 1].PartOffset;
1976 SlotVT.
getStoreSize()) &&
"Not enough space for argument part!");
1979 ArgValue = SpillSlot;
1996 if (!StackPtr.getNode())
2018 RegsToPass.
push_back(std::make_pair(SystemZ::R3D, ShadowArgValue));
2024 if (!MemOpChains.
empty())
2037 ->getAddressOfCalleeRegister();
2040 Callee = DAG.
getRegister(CalleeReg, Callee.getValueType());
2045 if (
auto *
G = dyn_cast<GlobalAddressSDNode>(Callee)) {
2048 }
else if (
auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
2051 }
else if (IsTailCall) {
2054 Callee = DAG.
getRegister(SystemZ::R1D, Callee.getValueType());
2059 for (
unsigned I = 0, E = RegsToPass.
size();
I != E; ++
I) {
2061 RegsToPass[
I].second, Glue);
2072 for (
unsigned I = 0, E = RegsToPass.
size();
I != E; ++
I)
2074 RegsToPass[
I].second.getValueType()));
2078 const uint32_t *Mask =
TRI->getCallPreservedMask(MF, CallConv);
2079 assert(Mask &&
"Missing call preserved mask for calling convention");
2103 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, Ctx);
2110 VA.getLocVT(), Glue);
2127 bool DoesNotReturn,
bool IsReturnValueUsed)
const {
2129 Args.reserve(Ops.
size());
2134 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.
getContext());
2137 Args.push_back(Entry);
2163 for (
auto &Out : Outs)
2164 if (Out.ArgVT == MVT::i128)
2168 CCState RetCCInfo(CallConv, isVarArg, MF, RetLocs, Context);
2169 return RetCCInfo.
CheckReturn(Outs, RetCC_SystemZ);
2186 if (RetLocs.
empty())
2196 for (
unsigned I = 0, E = RetLocs.
size();
I != E; ++
I) {
2225 unsigned &CCValid) {
2226 unsigned Id =
Op.getConstantOperandVal(1);
2228 case Intrinsic::s390_tbegin:
2233 case Intrinsic::s390_tbegin_nofloat:
2238 case Intrinsic::s390_tend:
2252 unsigned Id =
Op.getConstantOperandVal(0);
2254 case Intrinsic::s390_vpkshs:
2255 case Intrinsic::s390_vpksfs:
2256 case Intrinsic::s390_vpksgs:
2261 case Intrinsic::s390_vpklshs:
2262 case Intrinsic::s390_vpklsfs:
2263 case Intrinsic::s390_vpklsgs:
2268 case Intrinsic::s390_vceqbs:
2269 case Intrinsic::s390_vceqhs:
2270 case Intrinsic::s390_vceqfs:
2271 case Intrinsic::s390_vceqgs:
2276 case Intrinsic::s390_vchbs:
2277 case Intrinsic::s390_vchhs:
2278 case Intrinsic::s390_vchfs:
2279 case Intrinsic::s390_vchgs:
2284 case Intrinsic::s390_vchlbs:
2285 case Intrinsic::s390_vchlhs:
2286 case Intrinsic::s390_vchlfs:
2287 case Intrinsic::s390_vchlgs:
2292 case Intrinsic::s390_vtm:
2297 case Intrinsic::s390_vfaebs:
2298 case Intrinsic::s390_vfaehs:
2299 case Intrinsic::s390_vfaefs:
2304 case Intrinsic::s390_vfaezbs:
2305 case Intrinsic::s390_vfaezhs:
2306 case Intrinsic::s390_vfaezfs:
2311 case Intrinsic::s390_vfeebs:
2312 case Intrinsic::s390_vfeehs:
2313 case Intrinsic::s390_vfeefs:
2318 case Intrinsic::s390_vfeezbs:
2319 case Intrinsic::s390_vfeezhs:
2320 case Intrinsic::s390_vfeezfs:
2325 case Intrinsic::s390_vfenebs:
2326 case Intrinsic::s390_vfenehs:
2327 case Intrinsic::s390_vfenefs:
2332 case Intrinsic::s390_vfenezbs:
2333 case Intrinsic::s390_vfenezhs:
2334 case Intrinsic::s390_vfenezfs:
2339 case Intrinsic::s390_vistrbs:
2340 case Intrinsic::s390_vistrhs:
2341 case Intrinsic::s390_vistrfs:
2346 case Intrinsic::s390_vstrcbs:
2347 case Intrinsic::s390_vstrchs:
2348 case Intrinsic::s390_vstrcfs:
2353 case Intrinsic::s390_vstrczbs:
2354 case Intrinsic::s390_vstrczhs:
2355 case Intrinsic::s390_vstrczfs:
2360 case Intrinsic::s390_vstrsb:
2361 case Intrinsic::s390_vstrsh:
2362 case Intrinsic::s390_vstrsf:
2367 case Intrinsic::s390_vstrszb:
2368 case Intrinsic::s390_vstrszh:
2369 case Intrinsic::s390_vstrszf:
2374 case Intrinsic::s390_vfcedbs:
2375 case Intrinsic::s390_vfcesbs:
2380 case Intrinsic::s390_vfchdbs:
2381 case Intrinsic::s390_vfchsbs:
2386 case Intrinsic::s390_vfchedbs:
2387 case Intrinsic::s390_vfchesbs:
2392 case Intrinsic::s390_vftcidb:
2393 case Intrinsic::s390_vftcisb:
2398 case Intrinsic::s390_tdc:
2416 for (
unsigned I = 2;
I < NumOps; ++
I)
2419 assert(
Op->getNumValues() == 2 &&
"Expected only CC result and chain");
2425 return Intr.getNode();
2435 for (
unsigned I = 1;
I < NumOps; ++
I)
2439 return Intr.getNode();
2449 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \
2450 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \
2451 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X
2476 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1.getNode());
2477 if (!ConstOp1 || ConstOp1->getValueSizeInBits(0) > 64)
2480 int64_t
Value = ConstOp1->getSExtValue();
2496 if (!
C.Op0.hasOneUse() ||
2502 auto *Load = cast<LoadSDNode>(
C.Op0);
2503 unsigned NumBits = Load->getMemoryVT().getSizeInBits();
2504 if ((NumBits != 8 && NumBits != 16) ||
2505 NumBits != Load->getMemoryVT().getStoreSizeInBits())
2510 auto *ConstOp1 = cast<ConstantSDNode>(
C.Op1);
2511 if (!ConstOp1 || ConstOp1->getValueSizeInBits(0) > 64)
2514 uint64_t Mask = (1 << NumBits) - 1;
2517 int64_t SignedValue = ConstOp1->getSExtValue();
2524 }
else if (NumBits == 8) {
2550 if (
C.Op0.getValueType() != MVT::i32 ||
2551 Load->getExtensionType() != ExtType) {
2553 Load->getBasePtr(), Load->getPointerInfo(),
2554 Load->getMemoryVT(), Load->getAlign(),
2555 Load->getMemOperand()->getFlags());
2561 if (
C.Op1.getValueType() != MVT::i32 ||
2562 Value != ConstOp1->getZExtValue())
2569 auto *Load = dyn_cast<LoadSDNode>(
Op.getNode());
2572 if (Load->getMemoryVT() == MVT::i8)
2575 switch (Load->getExtensionType()) {
2592 if (
C.Op0.getValueType() == MVT::i128)
2594 if (
C.Op0.getValueType() == MVT::f128)
2600 if (isa<ConstantFPSDNode>(
C.Op1))
2605 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1);
2606 if (ConstOp1 && ConstOp1->getZExtValue() == 0)
2624 isUInt<16>(ConstOp1->getZExtValue()))
2629 isInt<16>(ConstOp1->getSExtValue()))
2635 unsigned Opcode0 =
C.Op0.getOpcode();
2642 C.Op0.getConstantOperandVal(1) == 0xffffffff)
2657 ((
N->getOperand(0) ==
C.Op0 &&
N->getOperand(1) ==
C.Op1) ||
2658 (
N->getOperand(0) ==
C.Op1 &&
N->getOperand(1) ==
C.Op0))) {
2662 Flags.setNoSignedWrap(
false);
2663 Flags.setNoUnsignedWrap(
false);
2682 auto *C1 = dyn_cast<ConstantFPSDNode>(
C.Op1);
2683 if (C1 && C1->isZero()) {
2702 if (
C.Op0.getOpcode() ==
ISD::SHL &&
C.Op0.getValueType() == MVT::i64 &&
2704 auto *C1 = dyn_cast<ConstantSDNode>(
C.Op0.getOperand(1));
2705 if (C1 && C1->getZExtValue() == 32) {
2706 SDValue ShlOp0 =
C.Op0.getOperand(0);
2710 cast<VTSDNode>(
N->getOperand(1))->getVT() == MVT::i32) {
2725 C.Op0.getOperand(0).getOpcode() ==
ISD::LOAD &&
2727 cast<ConstantSDNode>(
C.Op1)->getValueSizeInBits(0) <= 64 &&
2728 C.Op1->getAsZExtVal() == 0) {
2729 auto *L = cast<LoadSDNode>(
C.Op0.getOperand(0));
2730 if (L->getMemoryVT().getStoreSizeInBits().getFixedValue() <=
2731 C.Op0.getValueSizeInBits().getFixedValue()) {
2732 unsigned Type = L->getExtensionType();
2735 C.Op0 =
C.Op0.getOperand(0);
2745 auto *Shift = dyn_cast<ConstantSDNode>(
N.getOperand(1));
2749 uint64_t Amount = Shift->getZExtValue();
2750 if (Amount >=
N.getValueSizeInBits())
2765 unsigned ICmpType) {
2766 assert(Mask != 0 &&
"ANDs with zero should have been removed by now");
2788 if (EffectivelyUnsigned && CmpVal > 0 && CmpVal <=
Low) {
2794 if (EffectivelyUnsigned && CmpVal <
Low) {
2802 if (CmpVal == Mask) {
2808 if (EffectivelyUnsigned && CmpVal >= Mask -
Low && CmpVal < Mask) {
2814 if (EffectivelyUnsigned && CmpVal > Mask -
Low && CmpVal <= Mask) {
2822 if (EffectivelyUnsigned && CmpVal >= Mask -
High && CmpVal <
High) {
2828 if (EffectivelyUnsigned && CmpVal > Mask -
High && CmpVal <=
High) {
2857 if (
C.Op0.getValueType() == MVT::i128) {
2862 auto *Mask = dyn_cast<ConstantSDNode>(
C.Op1);
2863 if (Mask && Mask->getAPIntValue() == 0) {
2878 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1);
2881 uint64_t CmpVal = ConstOp1->getZExtValue();
2888 NewC.Op0 =
C.Op0.getOperand(0);
2889 NewC.Op1 =
C.Op0.getOperand(1);
2890 Mask = dyn_cast<ConstantSDNode>(NewC.Op1);
2893 MaskVal = Mask->getZExtValue();
2898 if (NewC.Op0.getValueType() != MVT::i64 ||
2913 MaskVal = -(CmpVal & -CmpVal);
2921 unsigned BitSize = NewC.Op0.getValueSizeInBits();
2922 unsigned NewCCMask, ShiftVal;
2924 NewC.Op0.getOpcode() ==
ISD::SHL &&
2926 (MaskVal >> ShiftVal != 0) &&
2927 ((CmpVal >> ShiftVal) << ShiftVal) == CmpVal &&
2929 MaskVal >> ShiftVal,
2932 NewC.Op0 = NewC.Op0.getOperand(0);
2933 MaskVal >>= ShiftVal;
2935 NewC.Op0.getOpcode() ==
ISD::SRL &&
2937 (MaskVal << ShiftVal != 0) &&
2938 ((CmpVal << ShiftVal) >> ShiftVal) == CmpVal &&
2940 MaskVal << ShiftVal,
2943 NewC.Op0 = NewC.Op0.getOperand(0);
2944 MaskVal <<= ShiftVal;
2955 if (Mask && Mask->getZExtValue() == MaskVal)
2960 C.CCMask = NewCCMask;
2968 if (
C.Op0.getValueType() != MVT::i128)
2986 bool Swap =
false, Invert =
false;
3005 C.CCMask ^=
C.CCValid;
3015 auto *Mask = dyn_cast<ConstantSDNode>(
C.Op0.getOperand(1));
3016 if (!Mask || Mask->getValueSizeInBits(0) > 64)
3019 if ((~Known.
Zero).getZExtValue() & ~Mask->getZExtValue())
3022 C.Op0 =
C.Op0.getOperand(0);
3034 C.CCValid = CCValid;
3037 C.CCMask =
CC < 4 ? 1 << (3 -
CC) : 0;
3040 C.CCMask =
CC < 4 ? ~(1 << (3 -
CC)) : -1;
3044 C.CCMask =
CC < 4 ? ~0U << (4 -
CC) : -1;
3047 C.CCMask =
CC < 4 ? ~(~0U << (4 -
CC)) : 0;
3051 C.CCMask =
CC < 4 ? ~0U << (3 -
CC) : -1;
3054 C.CCMask =
CC < 4 ? ~(~0U << (3 -
CC)) : 0;
3057 C.CCMask &= CCValid;
3065 bool IsSignaling =
false) {
3068 unsigned Opcode, CCValid;
3080 Comparison
C(CmpOp0, CmpOp1, Chain);
3082 if (
C.Op0.getValueType().isFloatingPoint()) {
3086 else if (!IsSignaling)
3108 C.CCMask &= ~SystemZ::CCMASK_CMP_UO;
3129 if (!
C.Op1.getNode()) {
3131 switch (
C.Op0.getOpcode()) {
3158 return DAG.
getNode(
C.Opcode,
DL, VTs,
C.Chain,
C.Op0,
C.Op1);
3160 return DAG.
getNode(
C.Opcode,
DL, MVT::i32,
C.Op0,
C.Op1);
3169 Op0 = DAG.
getNode(Extend,
DL, MVT::i64, Op0);
3170 Op1 = DAG.
getNode(Extend,
DL, MVT::i64, Op1);
3195 unsigned CCValid,
unsigned CCMask) {
3224 case CmpMode::Int:
return 0;
3244 case CmpMode::FP:
return 0;
3245 case CmpMode::StrictFP:
return 0;
3246 case CmpMode::SignalingFP:
return 0;
3278 int Mask[] = { Start, -1, Start + 1, -1 };
3298 !Subtarget.hasVectorEnhancements1()) {
3312 SDValue Ops[2] = { Res, NewChain };
3321 return DAG.
getNode(Opcode,
DL, VTs, Chain, CmpOp0, CmpOp1);
3323 return DAG.
getNode(Opcode,
DL, VT, CmpOp0, CmpOp1);
3336 bool IsSignaling)
const {
3339 assert (!IsSignaling || Chain);
3340 CmpMode Mode = IsSignaling ? CmpMode::SignalingFP :
3341 Chain ? CmpMode::StrictFP : IsFP ? CmpMode::FP : CmpMode::Int;
3342 bool Invert =
false;
3350 assert(IsFP &&
"Unexpected integer comparison");
3352 DL, VT, CmpOp1, CmpOp0, Chain);
3354 DL, VT, CmpOp0, CmpOp1, Chain);
3358 LT.getValue(1),
GE.getValue(1));
3367 assert(IsFP &&
"Unexpected integer comparison");
3369 DL, VT, CmpOp1, CmpOp0, Chain);
3371 DL, VT, CmpOp0, CmpOp1, Chain);
3375 LT.getValue(1),
GT.getValue(1));
3384 Cmp = getVectorCmp(DAG, Opcode,
DL, VT, CmpOp0, CmpOp1, Chain);
3388 Cmp = getVectorCmp(DAG, Opcode,
DL, VT, CmpOp1, CmpOp0, Chain);
3393 Chain =
Cmp.getValue(1);
3401 if (Chain && Chain.
getNode() !=
Cmp.getNode()) {
3414 EVT VT =
Op.getValueType();
3416 return lowerVectorSETCC(DAG,
DL, VT,
CC, CmpOp0, CmpOp1);
3425 bool IsSignaling)
const {
3431 EVT VT =
Op.getNode()->getValueType(0);
3433 SDValue Res = lowerVectorSETCC(DAG,
DL, VT,
CC, CmpOp0, CmpOp1,
3434 Chain, IsSignaling);
3498 cast<ConstantSDNode>(
C.Op1)->getValueSizeInBits(0) <= 64 &&
3499 C.Op1->getAsZExtVal() == 0) {
3507 SDValue Ops[] = {TrueOp, FalseOp,
3581 Chain = DAG.
getCopyToReg(Chain,
DL, SystemZ::R2D, GOTOffset, Glue);
3588 Node->getValueType(0),
3600 assert(Mask &&
"Missing call preserved mask for calling convention");
3608 Chain = DAG.
getNode(Opcode,
DL, NodeTys, Ops);
3615SDValue SystemZTargetLowering::lowerThreadPointer(
const SDLoc &
DL,
3647 SDValue TP = lowerThreadPointer(
DL, DAG);
3755 if (
CP->isMachineConstantPoolEntry())
3774 unsigned Depth =
Op.getConstantOperandVal(0);
3781 int BackChainIdx = TFL->getOrCreateFramePointerSaveIndex(MF);
3810 unsigned Depth =
Op.getConstantOperandVal(0);
3818 SDValue FrameAddr = lowerFRAMEADDR(
Op, DAG);
3820 int Offset = TFL->getReturnAddressOffset(MF);
3831 &SystemZ::GR64BitRegClass);
3839 EVT InVT =
In.getValueType();
3840 EVT ResVT =
Op.getValueType();
3845 if (
auto *LoadN = dyn_cast<LoadSDNode>(In))
3848 LoadN->getBasePtr(), LoadN->getMemOperand());
3854 if (InVT == MVT::i32 && ResVT == MVT::f32) {
3856 if (Subtarget.hasHighWord()) {
3860 MVT::i64,
SDValue(U64, 0), In);
3868 DL, MVT::f32, Out64);
3870 if (InVT == MVT::f32 && ResVT == MVT::i32) {
3873 MVT::f64,
SDValue(U64, 0), In);
3875 if (Subtarget.hasHighWord())
3889 return lowerVASTART_XPLINK(
Op, DAG);
3891 return lowerVASTART_ELF(
Op, DAG);
3906 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
3920 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
3924 const unsigned NumFields = 4;
3935 for (
unsigned I = 0;
I < NumFields; ++
I) {
3940 MemOps[
I] = DAG.
getStore(Chain,
DL, Fields[
I], FieldAddr,
3952 const Value *DstSV = cast<SrcValueSDNode>(
Op.getOperand(3))->getValue();
3953 const Value *SrcSV = cast<SrcValueSDNode>(
Op.getOperand(4))->getValue();
3959 Align(8),
false,
false,
3965SystemZTargetLowering::lowerDYNAMIC_STACKALLOC(
SDValue Op,
3968 return lowerDYNAMIC_STACKALLOC_XPLINK(
Op, DAG);
3970 return lowerDYNAMIC_STACKALLOC_ELF(
Op, DAG);
3974SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_XPLINK(
SDValue Op,
3986 uint64_t AlignVal = (RealignOpt ?
Align->getAsZExtVal() : 0);
3989 uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
3990 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;
3996 if (ExtraAlignSpace)
4000 bool IsSigned =
false;
4001 bool DoesNotReturn =
false;
4002 bool IsReturnValueUsed =
false;
4003 EVT VT =
Op.getValueType();
4014 Register SPReg = Regs.getStackPointerRegister();
4025 if (ExtraAlignSpace) {
4037SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_ELF(
SDValue Op,
4051 uint64_t AlignVal = (RealignOpt ?
Align->getAsZExtVal() : 0);
4054 uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
4055 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;
4066 Backchain = DAG.
getLoad(MVT::i64,
DL, Chain, getBackchainAddress(OldSP, DAG),
4070 if (ExtraAlignSpace)
4078 DAG.
getVTList(MVT::i64, MVT::Other), Chain, OldSP, NeededSpace);
4094 if (RequiredAlign > StackAlign) {
4104 Chain = DAG.
getStore(Chain,
DL, Backchain, getBackchainAddress(NewSP, DAG),
4111SDValue SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET(
4120 EVT VT =
Op.getValueType();
4127 Op.getOperand(1), Ops[1], Ops[0]);
4128 else if (Subtarget.hasMiscellaneousExtensions2())
4133 Op.getOperand(0),
Op.getOperand(1), Ops[1], Ops[0]);
4157 LL, RL, Ops[1], Ops[0]);
4168 EVT VT =
Op.getValueType();
4175 Op.getOperand(1), Ops[1], Ops[0]);
4181 Op.getOperand(0),
Op.getOperand(1), Ops[1], Ops[0]);
4189 EVT VT =
Op.getValueType();
4209 EVT VT =
Op.getValueType();
4216 Op.getOperand(0),
Op.getOperand(1), Ops[1], Ops[0]);
4221 assert(
Op.getValueType() == MVT::i64 &&
"Should be 64-bit operation");
4224 SDValue Ops[] = {
Op.getOperand(0),
Op.getOperand(1)};
4233 if ((Masks[0] >> 32) == 0xffffffff &&
uint32_t(Masks[1]) == 0xffffffff)
4235 else if ((Masks[1] >> 32) == 0xffffffff &&
uint32_t(Masks[0]) == 0xffffffff)
4251 if (!isInt<16>(
Value))
4272 MVT::i64, HighOp, Low32);
4283 if (
N->getValueType(0) == MVT::i128) {
4284 unsigned BaseOp = 0;
4285 unsigned FlagOp = 0;
4286 bool IsBorrow =
false;
4287 switch (
Op.getOpcode()) {
4310 unsigned BaseOp = 0;
4311 unsigned CCValid = 0;
4312 unsigned CCMask = 0;
4314 switch (
Op.getOpcode()) {
4342 if (
N->getValueType(1) == MVT::i1)
4365 MVT VT =
N->getSimpleValueType(0);
4376 if (VT == MVT::i128) {
4377 unsigned BaseOp = 0;
4378 unsigned FlagOp = 0;
4379 bool IsBorrow =
false;
4380 switch (
Op.getOpcode()) {
4407 unsigned BaseOp = 0;
4408 unsigned CCValid = 0;
4409 unsigned CCMask = 0;
4411 switch (
Op.getOpcode()) {
4440 if (
N->getValueType(1) == MVT::i1)
4448 EVT VT =
Op.getValueType();
4450 Op =
Op.getOperand(0);
4498 if (NumSignificantBits == 0)
4504 BitSize = std::min(BitSize, OrigBitSize);
4513 for (int64_t
I = BitSize / 2;
I >= 8;
I =
I / 2) {
4515 if (BitSize != OrigBitSize)
4552 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
4554 (
Node->getMemoryVT() == MVT::i128 ||
Node->getMemoryVT() == MVT::f128) &&
4555 "Only custom lowering i128 or f128.");
4567 EVT PtrVT =
Addr.getValueType();
4568 EVT WideVT = MVT::i32;
4591 unsigned Opcode)
const {
4592 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
4595 EVT NarrowVT =
Node->getMemoryVT();
4596 EVT WideVT = MVT::i32;
4597 if (NarrowVT == WideVT)
4609 if (
auto *Const = dyn_cast<ConstantSDNode>(Src2)) {
4614 SDValue AlignedAddr, BitShift, NegBitShift;
4632 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift,
4651 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
4652 EVT MemVT =
Node->getMemoryVT();
4653 if (MemVT == MVT::i32 || MemVT == MVT::i64) {
4655 assert(
Op.getValueType() == MemVT &&
"Mismatched VTs");
4656 assert(Subtarget.hasInterlockedAccess1() &&
4657 "Should have been expanded by AtomicExpand pass.");
4663 Node->getChain(),
Node->getBasePtr(), NegSrc2,
4664 Node->getMemOperand());
4673 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
4681 if (
Node->getMemoryVT() == MVT::i128) {
4690 EVT NarrowVT =
Node->getMemoryVT();
4691 EVT WideVT = NarrowVT == MVT::i64 ? MVT::i64 : MVT::i32;
4692 if (NarrowVT == WideVT) {
4694 SDValue Ops[] = { ChainIn,
Addr, CmpVal, SwapVal };
4696 DL, Tys, Ops, NarrowVT, MMO);
4710 SDValue AlignedAddr, BitShift, NegBitShift;
4715 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift,
4718 VTList, Ops, NarrowVT, MMO);
4732SystemZTargetLowering::getTargetMMOFlags(
const Instruction &
I)
const {
4737 if (
auto *SI = dyn_cast<StoreInst>(&
I))
4740 if (
auto *LI = dyn_cast<LoadInst>(&
I))
4743 if (
auto *AI = dyn_cast<AtomicRMWInst>(&
I))
4746 if (
auto *AI = dyn_cast<AtomicCmpXchgInst>(&
I))
4758 "in GHC calling convention");
4760 Regs->getStackPointerRegister(),
Op.getValueType());
4771 "in GHC calling convention");
4778 if (StoreBackchain) {
4780 Chain,
DL, Regs->getStackPointerRegister(), MVT::i64);
4781 Backchain = DAG.
getLoad(MVT::i64,
DL, Chain, getBackchainAddress(OldSP, DAG),
4785 Chain = DAG.
getCopyToReg(Chain,
DL, Regs->getStackPointerRegister(), NewSP);
4788 Chain = DAG.
getStore(Chain,
DL, Backchain, getBackchainAddress(NewSP, DAG),
4796 bool IsData =
Op.getConstantOperandVal(4);
4799 return Op.getOperand(0);
4802 bool IsWrite =
Op.getConstantOperandVal(2);
4804 auto *
Node = cast<MemIntrinsicSDNode>(
Op.getNode());
4808 Node->getVTList(), Ops,
4809 Node->getMemoryVT(),
Node->getMemOperand());
4821SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(
SDValue Op,
4823 unsigned Opcode, CCValid;
4825 assert(
Op->getNumValues() == 2 &&
"Expected only CC result and chain");
4836SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(
SDValue Op,
4838 unsigned Opcode, CCValid;
4841 if (
Op->getNumValues() == 1)
4843 assert(
Op->getNumValues() == 2 &&
"Expected a CC and non-CC result");
4848 unsigned Id =
Op.getConstantOperandVal(0);
4850 case Intrinsic::thread_pointer:
4851 return lowerThreadPointer(
SDLoc(
Op), DAG);
4853 case Intrinsic::s390_vpdi:
4855 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4857 case Intrinsic::s390_vperm:
4859 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4861 case Intrinsic::s390_vuphb:
4862 case Intrinsic::s390_vuphh:
4863 case Intrinsic::s390_vuphf:
4867 case Intrinsic::s390_vuplhb:
4868 case Intrinsic::s390_vuplhh:
4869 case Intrinsic::s390_vuplhf:
4873 case Intrinsic::s390_vuplb:
4874 case Intrinsic::s390_vuplhw:
4875 case Intrinsic::s390_vuplf:
4879 case Intrinsic::s390_vupllb:
4880 case Intrinsic::s390_vupllh:
4881 case Intrinsic::s390_vupllf:
4885 case Intrinsic::s390_vsumb:
4886 case Intrinsic::s390_vsumh:
4887 case Intrinsic::s390_vsumgh:
4888 case Intrinsic::s390_vsumgf:
4889 case Intrinsic::s390_vsumqf:
4890 case Intrinsic::s390_vsumqg:
4892 Op.getOperand(1),
Op.getOperand(2));
4894 case Intrinsic::s390_vaq:
4896 Op.getOperand(1),
Op.getOperand(2));
4897 case Intrinsic::s390_vaccb:
4898 case Intrinsic::s390_vacch:
4899 case Intrinsic::s390_vaccf:
4900 case Intrinsic::s390_vaccg:
4901 case Intrinsic::s390_vaccq:
4903 Op.getOperand(1),
Op.getOperand(2));
4904 case Intrinsic::s390_vacq:
4906 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4907 case Intrinsic::s390_vacccq:
4909 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4911 case Intrinsic::s390_vsq:
4913 Op.getOperand(1),
Op.getOperand(2));
4914 case Intrinsic::s390_vscbib:
4915 case Intrinsic::s390_vscbih:
4916 case Intrinsic::s390_vscbif:
4917 case Intrinsic::s390_vscbig:
4918 case Intrinsic::s390_vscbiq:
4920 Op.getOperand(1),
Op.getOperand(2));
4921 case Intrinsic::s390_vsbiq:
4923 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4924 case Intrinsic::s390_vsbcbiq:
4926 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4947 { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 } },
4950 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
4953 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
4956 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
4959 { 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 } },
4962 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
4965 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
4968 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
4971 { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 } },
4974 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
4977 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
4980 { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } },
4983 { 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 } }
4997 OpNo0 = OpNo1 = OpNos[1];
4998 }
else if (OpNos[1] < 0) {
4999 OpNo0 = OpNo1 = OpNos[0];
5017 unsigned &OpNo0,
unsigned &OpNo1) {
5018 int OpNos[] = { -1, -1 };
5031 if (OpNos[ModelOpNo] == 1 - RealOpNo)
5033 OpNos[ModelOpNo] = RealOpNo;
5041 unsigned &OpNo0,
unsigned &OpNo1) {
5058 int Elt = Bytes[
From];
5061 Transform[
From] = -1;
5063 while (
P.Bytes[To] != Elt) {
5068 Transform[
From] = To;
5091 if (
auto *VSN = dyn_cast<ShuffleVectorSDNode>(ShuffleOp)) {
5092 Bytes.
resize(NumElements * BytesPerElement, -1);
5093 for (
unsigned I = 0;
I < NumElements; ++
I) {
5094 int Index = VSN->getMaskElt(
I);
5096 for (
unsigned J = 0; J < BytesPerElement; ++J)
5097 Bytes[
I * BytesPerElement + J] =
Index * BytesPerElement + J;
5102 isa<ConstantSDNode>(ShuffleOp.
getOperand(1))) {
5104 Bytes.
resize(NumElements * BytesPerElement, -1);
5105 for (
unsigned I = 0;
I < NumElements; ++
I)
5106 for (
unsigned J = 0; J < BytesPerElement; ++J)
5107 Bytes[
I * BytesPerElement + J] =
Index * BytesPerElement + J;
5118 unsigned BytesPerElement,
int &
Base) {
5120 for (
unsigned I = 0;
I < BytesPerElement; ++
I) {
5121 if (Bytes[Start +
I] >= 0) {
5122 unsigned Elem = Bytes[Start +
I];
5126 if (
unsigned(
Base) % Bytes.
size() + BytesPerElement > Bytes.
size())
5128 }
else if (
unsigned(
Base) != Elem -
I)
5141 unsigned &StartIndex,
unsigned &OpNo0,
5143 int OpNos[] = { -1, -1 };
5145 for (
unsigned I = 0;
I < 16; ++
I) {
5152 Shift = ExpectedShift;
5153 else if (Shift != ExpectedShift)
5157 if (OpNos[ModelOpNo] == 1 - RealOpNo)
5159 OpNos[ModelOpNo] = RealOpNo;
5196 N =
N->getOperand(0);
5198 if (
auto *
Op = dyn_cast<ConstantSDNode>(
N->getOperand(0)))
5199 return Op->getZExtValue() == 0;
5205 for (
unsigned I = 0;
I < Num ;
I++)
5217 for (
unsigned I = 0;
I < 2; ++
I)
5221 unsigned StartIndex, OpNo0, OpNo1;
5230 if (ZeroVecIdx != UINT32_MAX) {
5231 bool MaskFirst =
true;
5236 if (OpNo == ZeroVecIdx &&
I == 0) {
5241 if (OpNo != ZeroVecIdx && Byte == 0) {
5248 if (ZeroIdx != -1) {
5251 if (Bytes[
I] >= 0) {
5254 if (OpNo == ZeroVecIdx)
5264 SDValue Src = ZeroVecIdx == 0 ? Ops[1] : Ops[0];
5282 (!Ops[1].
isUndef() ? Ops[1] : Ops[0]), Op2);
5287struct GeneralShuffle {
5288 GeneralShuffle(
EVT vt) : VT(vt), UnpackFromEltSize(UINT_MAX) {}
5292 void tryPrepareForUnpack();
5293 bool unpackWasPrepared() {
return UnpackFromEltSize <= 4; }
5308 unsigned UnpackFromEltSize;
5313void GeneralShuffle::addUndef() {
5315 for (
unsigned I = 0;
I < BytesPerElement; ++
I)
5316 Bytes.push_back(-1);
5325bool GeneralShuffle::add(
SDValue Op,
unsigned Elem) {
5331 EVT FromVT =
Op.getNode() ?
Op.getValueType() : VT;
5336 if (FromBytesPerElement < BytesPerElement)
5340 (FromBytesPerElement - BytesPerElement));
5343 while (
Op.getNode()) {
5345 Op =
Op.getOperand(0);
5361 }
else if (
Op.isUndef()) {
5370 for (; OpNo < Ops.size(); ++OpNo)
5371 if (Ops[OpNo] ==
Op)
5373 if (OpNo == Ops.size())
5378 for (
unsigned I = 0;
I < BytesPerElement; ++
I)
5379 Bytes.push_back(
Base +
I);
5388 if (Ops.size() == 0)
5392 tryPrepareForUnpack();
5395 if (Ops.size() == 1)
5396 Ops.push_back(DAG.
getUNDEF(MVT::v16i8));
5407 unsigned Stride = 1;
5408 for (; Stride * 2 < Ops.size(); Stride *= 2) {
5409 for (
unsigned I = 0;
I < Ops.size() - Stride;
I += Stride * 2) {
5410 SDValue SubOps[] = { Ops[
I], Ops[
I + Stride] };
5419 else if (OpNo ==
I + Stride)
5430 if (NewBytes[J] >= 0) {
5432 "Invalid double permute");
5435 assert(NewBytesMap[J] < 0 &&
"Invalid double permute");
5441 if (NewBytes[J] >= 0)
5449 Ops[1] = Ops[Stride];
5457 unsigned OpNo0, OpNo1;
5459 if (unpackWasPrepared() && Ops[1].
isUndef())
5461 else if (
const Permute *
P =
matchPermute(Bytes, OpNo0, OpNo1))
5466 Op = insertUnpackIfPrepared(DAG,
DL,
Op);
5473 dbgs() << Msg.c_str() <<
" { ";
5474 for (
unsigned i = 0; i < Bytes.
size(); i++)
5475 dbgs() << Bytes[i] <<
" ";
5483void GeneralShuffle::tryPrepareForUnpack() {
5485 if (ZeroVecOpNo == UINT32_MAX || Ops.size() == 1)
5490 if (Ops.size() > 2 &&
5495 UnpackFromEltSize = 1;
5496 for (; UnpackFromEltSize <= 4; UnpackFromEltSize *= 2) {
5497 bool MatchUnpack =
true;
5500 unsigned ToEltSize = UnpackFromEltSize * 2;
5501 bool IsZextByte = (Elt % ToEltSize) < UnpackFromEltSize;
5504 if (Bytes[Elt] != -1) {
5506 if (IsZextByte != (OpNo == ZeroVecOpNo)) {
5507 MatchUnpack =
false;
5513 if (Ops.size() == 2) {
5516 if (SrcBytes[i] != -1 && SrcBytes[i] % 16 !=
int(i)) {
5517 UnpackFromEltSize = UINT_MAX;
5524 if (UnpackFromEltSize > 4)
5527 LLVM_DEBUG(
dbgs() <<
"Preparing for final unpack of element size "
5528 << UnpackFromEltSize <<
". Zero vector is Op#" << ZeroVecOpNo
5530 dumpBytes(Bytes,
"Original Bytes vector:"););
5535 Elt += UnpackFromEltSize;
5536 for (
unsigned i = 0; i < UnpackFromEltSize; i++, Elt++,
B++)
5537 Bytes[
B] = Bytes[Elt];
5543 Ops.erase(&Ops[ZeroVecOpNo]);
5545 if (Bytes[
I] >= 0) {
5547 if (OpNo > ZeroVecOpNo)
5558 if (!unpackWasPrepared())
5560 unsigned InBits = UnpackFromEltSize * 8;
5564 unsigned OutBits = InBits * 2;
5573 if (!
Op.getOperand(
I).isUndef())
5589 if (
Value.isUndef())
5642 GeneralShuffle GS(VT);
5644 bool FoundOne =
false;
5645 for (
unsigned I = 0;
I < NumElements; ++
I) {
5648 Op =
Op.getOperand(0);
5651 unsigned Elem =
Op.getConstantOperandVal(1);
5652 if (!GS.add(
Op.getOperand(0), Elem))
5655 }
else if (
Op.isUndef()) {
5669 if (!ResidueOps.
empty()) {
5670 while (ResidueOps.
size() < NumElements)
5672 for (
auto &
Op : GS.Ops) {
5673 if (!
Op.getNode()) {
5679 return GS.getNode(DAG,
SDLoc(BVN));
5682bool SystemZTargetLowering::isVectorElementLoad(
SDValue Op)
const {
5683 if (
Op.getOpcode() ==
ISD::LOAD && cast<LoadSDNode>(
Op)->isUnindexed())
5685 if (
auto *AL = dyn_cast<AtomicSDNode>(
Op))
5699 unsigned int NumElements = Elems.
size();
5700 unsigned int Count = 0;
5701 for (
auto Elem : Elems) {
5702 if (!Elem.isUndef()) {
5705 else if (Elem != Single) {
5725 if (
Single.getNode() && (Count > 1 || isVectorElementLoad(Single)))
5729 bool AllLoads =
true;
5730 for (
auto Elem : Elems)
5731 if (!isVectorElementLoad(Elem)) {
5737 if (VT == MVT::v2i64 && !AllLoads)
5741 if (VT == MVT::v2f64 && !AllLoads)
5751 if (VT == MVT::v4f32 && !AllLoads) {
5765 DL, MVT::v2i64, Op01, Op23);
5773 unsigned NumConstants = 0;
5774 for (
unsigned I = 0;
I < NumElements; ++
I) {
5788 if (NumConstants > 0) {
5789 for (
unsigned I = 0;
I < NumElements; ++
I)
5800 std::map<const SDNode*, unsigned> UseCounts;
5801 SDNode *LoadMaxUses =
nullptr;
5802 for (
unsigned I = 0;
I < NumElements; ++
I)
5803 if (isVectorElementLoad(Elems[
I])) {
5804 SDNode *Ld = Elems[
I].getNode();
5806 if (LoadMaxUses ==
nullptr || UseCounts[LoadMaxUses] < UseCounts[Ld])
5809 if (LoadMaxUses !=
nullptr) {
5810 ReplicatedVal =
SDValue(LoadMaxUses, 0);
5814 unsigned I1 = NumElements / 2 - 1;
5815 unsigned I2 = NumElements - 1;
5816 bool Def1 = !Elems[
I1].isUndef();
5817 bool Def2 = !Elems[I2].isUndef();
5831 for (
unsigned I = 0;
I < NumElements; ++
I)
5832 if (!
Done[
I] && !Elems[
I].
isUndef() && Elems[
I] != ReplicatedVal)
5840 auto *BVN = cast<BuildVectorSDNode>(
Op.getNode());
5842 EVT VT =
Op.getValueType();
5844 if (BVN->isConstant()) {
5863 for (
unsigned I = 0;
I < NumElements; ++
I)
5864 Ops[
I] =
Op.getOperand(
I);
5865 return buildVector(DAG,
DL, VT, Ops);
5870 auto *VSN = cast<ShuffleVectorSDNode>(
Op.getNode());
5872 EVT VT =
Op.getValueType();
5875 if (VSN->isSplat()) {
5877 unsigned Index = VSN->getSplatIndex();
5879 "Splat index should be defined and in first operand");
5889 GeneralShuffle
GS(VT);
5890 for (
unsigned I = 0;
I < NumElements; ++
I) {
5891 int Elt = VSN->getMaskElt(
I);
5894 else if (!
GS.add(
Op.getOperand(
unsigned(Elt) / NumElements),
5895 unsigned(Elt) % NumElements))
5898 return GS.getNode(DAG,
SDLoc(VSN));
5917 EVT VT =
Op.getValueType();
5922 if (VT == MVT::v2f64 &&
5942SystemZTargetLowering::lowerEXTRACT_VECTOR_ELT(
SDValue Op,
5948 EVT VT =
Op.getValueType();
5952 if (
auto *CIndexN = dyn_cast<ConstantSDNode>(Op1)) {
5967SDValue SystemZTargetLowering::
5970 EVT OutVT =
Op.getValueType();
5980 }
while (FromBits != ToBits);
5985SDValue SystemZTargetLowering::
5989 EVT OutVT =
Op.getValueType();
5993 unsigned NumInPerOut = InNumElts / OutNumElts;
5999 unsigned ZeroVecElt = InNumElts;
6000 for (
unsigned PackedElt = 0; PackedElt < OutNumElts; PackedElt++) {
6001 unsigned MaskElt = PackedElt * NumInPerOut;
6002 unsigned End = MaskElt + NumInPerOut - 1;
6003 for (; MaskElt <
End; MaskElt++)
6004 Mask[MaskElt] = ZeroVecElt++;
6005 Mask[MaskElt] = PackedElt;
6012 unsigned ByScalar)
const {
6017 EVT VT =
Op.getValueType();
6021 if (
auto *BVN = dyn_cast<BuildVectorSDNode>(Op1)) {
6022 APInt SplatBits, SplatUndef;
6023 unsigned SplatBitSize;
6027 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs,
6028 ElemBitSize,
true) &&
6029 SplatBitSize == ElemBitSize) {
6032 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6041 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6047 if (
auto *VSN = dyn_cast<ShuffleVectorSDNode>(Op1)) {
6048 if (VSN->isSplat()) {
6050 unsigned Index = VSN->getSplatIndex();
6052 "Splat index should be defined and in first operand");
6059 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6071 MVT DstVT =
Op.getSimpleValueType();
6074 unsigned SrcAS =
N->getSrcAddressSpace();
6076 assert(SrcAS !=
N->getDestAddressSpace() &&
6077 "addrspacecast must be between different address spaces");
6085 }
else if (DstVT == MVT::i32) {
6099 MVT ResultVT =
Op.getSimpleValueType();
6101 unsigned Check =
Op.getConstantOperandVal(1);
6103 unsigned TDCMask = 0;
6137 int SPFI = cast<FrameIndexSDNode>(
StackPtr.getNode())->getIndex();
6148 return DAG.
getLoad(MVT::i64,
DL, Chain, StackPtr, MPI);
6153 switch (
Op.getOpcode()) {
6155 return lowerFRAMEADDR(
Op, DAG);
6157 return lowerRETURNADDR(
Op, DAG);
6159 return lowerBR_CC(
Op, DAG);
6161 return lowerSELECT_CC(
Op, DAG);
6163 return lowerSETCC(
Op, DAG);
6165 return lowerSTRICT_FSETCC(
Op, DAG,
false);
6167 return lowerSTRICT_FSETCC(
Op, DAG,
true);
6169 return lowerGlobalAddress(cast<GlobalAddressSDNode>(
Op), DAG);
6171 return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(
Op), DAG);
6173 return lowerBlockAddress(cast<BlockAddressSDNode>(
Op), DAG);
6175 return lowerJumpTable(cast<JumpTableSDNode>(
Op), DAG);
6177 return lowerConstantPool(cast<ConstantPoolSDNode>(
Op), DAG);
6179 return lowerBITCAST(
Op, DAG);
6181 return lowerVASTART(
Op, DAG);
6183 return lowerVACOPY(
Op, DAG);
6185 return lowerDYNAMIC_STACKALLOC(
Op, DAG);
6187 return lowerGET_DYNAMIC_AREA_OFFSET(
Op, DAG);
6189 return lowerSMUL_LOHI(
Op, DAG);
6191 return lowerUMUL_LOHI(
Op, DAG);
6193 return lowerSDIVREM(
Op, DAG);
6195 return lowerUDIVREM(
Op, DAG);
6200 return lowerXALUO(
Op, DAG);
6203 return lowerUADDSUBO_CARRY(
Op, DAG);
6205 return lowerOR(
Op, DAG);
6207 return lowerCTPOP(
Op, DAG);
6209 return lowerVECREDUCE_ADD(
Op, DAG);
6211 return lowerATOMIC_FENCE(
Op, DAG);
6216 return lowerATOMIC_LDST_I128(
Op, DAG);
6220 return lowerATOMIC_LOAD_SUB(
Op, DAG);
6238 return lowerATOMIC_CMP_SWAP(
Op, DAG);
6240 return lowerSTACKSAVE(
Op, DAG);
6242 return lowerSTACKRESTORE(
Op, DAG);
6244 return lowerPREFETCH(
Op, DAG);
6246 return lowerINTRINSIC_W_CHAIN(
Op, DAG);
6248 return lowerINTRINSIC_WO_CHAIN(
Op, DAG);
6250 return lowerBUILD_VECTOR(
Op, DAG);
6252 return lowerVECTOR_SHUFFLE(
Op, DAG);
6254 return lowerSCALAR_TO_VECTOR(
Op, DAG);
6256 return lowerINSERT_VECTOR_ELT(
Op, DAG);
6258 return lowerEXTRACT_VECTOR_ELT(
Op, DAG);
6260 return lowerSIGN_EXTEND_VECTOR_INREG(
Op, DAG);
6262 return lowerZERO_EXTEND_VECTOR_INREG(
Op, DAG);
6274 return lowerIS_FPCLASS(
Op, DAG);
6276 return lowerGET_ROUNDING(
Op, DAG);
6278 return lowerREADCYCLECOUNTER(
Op, DAG);
6292 &SystemZ::FP128BitRegClass);
6301 SystemZ::REG_SEQUENCE, SL, MVT::f128,
6316 &SystemZ::FP128BitRegClass);
6334 switch (
N->getOpcode()) {
6338 SDValue Ops[] = {
N->getOperand(0),
N->getOperand(1) };
6341 DL, Tys, Ops, MVT::i128, MMO);
6344 if (
N->getValueType(0) == MVT::f128)
6358 SDValue Ops[] = {
N->getOperand(0), Val,
N->getOperand(2)};
6361 DL, Tys, Ops, MVT::i128, MMO);
6364 if (cast<AtomicSDNode>(
N)->getSuccessOrdering() ==
6367 MVT::Other, Res), 0);
6374 SDValue Ops[] = {
N->getOperand(0),
N->getOperand(1),
6379 DL, Tys, Ops, MVT::i128, MMO);
6390 if (
N->getValueType(0) == MVT::i128 && Src.getValueType() == MVT::f128 &&
6410#define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME
6521 OPCODE(ATOMIC_LOADW_ADD);
6522 OPCODE(ATOMIC_LOADW_SUB);
6523 OPCODE(ATOMIC_LOADW_AND);
6525 OPCODE(ATOMIC_LOADW_XOR);
6526 OPCODE(ATOMIC_LOADW_NAND);
6527 OPCODE(ATOMIC_LOADW_MIN);
6528 OPCODE(ATOMIC_LOADW_MAX);
6529 OPCODE(ATOMIC_LOADW_UMIN);
6530 OPCODE(ATOMIC_LOADW_UMAX);
6531 OPCODE(ATOMIC_CMP_SWAPW);
6534 OPCODE(ATOMIC_STORE_128);
6535 OPCODE(ATOMIC_CMP_SWAP_128);
6550bool SystemZTargetLowering::canTreatAsByteVector(
EVT VT)
const {
6551 if (!Subtarget.hasVector())
6565 DAGCombinerInfo &DCI,
6573 unsigned Opcode =
Op.getOpcode();
6576 Op =
Op.getOperand(0);
6578 canTreatAsByteVector(
Op.getValueType())) {
6587 BytesPerElement,
First))
6594 if (Byte % BytesPerElement != 0)
6597 Index = Byte / BytesPerElement;
6601 canTreatAsByteVector(
Op.getValueType())) {
6604 EVT OpVT =
Op.getValueType();
6606 if (OpBytesPerElement < BytesPerElement)
6610 unsigned End = (
Index + 1) * BytesPerElement;
6611 if (
End % OpBytesPerElement != 0)
6614 Op =
Op.getOperand(
End / OpBytesPerElement - 1);
6615 if (!
Op.getValueType().isInteger()) {
6618 DCI.AddToWorklist(
Op.getNode());
6623 DCI.AddToWorklist(
Op.getNode());
6630 canTreatAsByteVector(
Op.getValueType()) &&
6631 canTreatAsByteVector(
Op.getOperand(0).getValueType())) {
6633 EVT ExtVT =
Op.getValueType();
6634 EVT OpVT =
Op.getOperand(0).getValueType();
6637 unsigned Byte =
Index * BytesPerElement;
6638 unsigned SubByte =
Byte % ExtBytesPerElement;
6639 unsigned MinSubByte = ExtBytesPerElement - OpBytesPerElement;
6640 if (SubByte < MinSubByte ||
6641 SubByte + BytesPerElement > ExtBytesPerElement)
6644 Byte =
Byte / ExtBytesPerElement * OpBytesPerElement;
6646 Byte += SubByte - MinSubByte;
6647 if (Byte % BytesPerElement != 0)
6649 Op =
Op.getOperand(0);
6656 if (
Op.getValueType() != VecVT) {
6658 DCI.AddToWorklist(
Op.getNode());
6668SDValue SystemZTargetLowering::combineTruncateExtract(
6677 if (canTreatAsByteVector(VecVT)) {
6678 if (
auto *IndexN = dyn_cast<ConstantSDNode>(
Op.getOperand(1))) {
6681 if (BytesPerElement % TruncBytes == 0) {
6687 unsigned Scale = BytesPerElement / TruncBytes;
6688 unsigned NewIndex = (IndexN->getZExtValue() + 1) * Scale - 1;
6695 EVT ResVT = (TruncBytes < 4 ? MVT::i32 : TruncVT);
6696 return combineExtract(
DL, ResVT, VecVT, Vec, NewIndex, DCI,
true);
6704SDValue SystemZTargetLowering::combineZERO_EXTEND(
6705 SDNode *
N, DAGCombinerInfo &DCI)
const {
6709 EVT VT =
N->getValueType(0);
6711 auto *TrueOp = dyn_cast<ConstantSDNode>(N0.
getOperand(0));
6712 auto *FalseOp = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
6713 if (TrueOp && FalseOp) {
6723 DCI.CombineTo(N0.
getNode(), TruncSelect);
6753SDValue SystemZTargetLowering::combineSIGN_EXTEND_INREG(
6754 SDNode *
N, DAGCombinerInfo &DCI)
const {
6760 EVT VT =
N->getValueType(0);
6761 EVT EVT = cast<VTSDNode>(
N->getOperand(1))->getVT();
6774SDValue SystemZTargetLowering::combineSIGN_EXTEND(
6775 SDNode *
N, DAGCombinerInfo &DCI)
const {
6781 EVT VT =
N->getValueType(0);
6783 auto *SraAmt = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
6786 if (
auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.
getOperand(1))) {
6788 unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra;
6789 unsigned NewSraAmt = SraAmt->getZExtValue() + Extra;
6805SDValue SystemZTargetLowering::combineMERGE(
6806 SDNode *
N, DAGCombinerInfo &DCI)
const {
6808 unsigned Opcode =
N->getOpcode();
6816 if (Op1 ==
N->getOperand(0))
6821 if (ElemBytes <= 4) {
6829 DCI.AddToWorklist(Op1.
getNode());
6832 DCI.AddToWorklist(
Op.getNode());
6841 LoPart = HiPart =
nullptr;
6845 UI != UIEnd; ++UI) {
6847 if (UI.getUse().getResNo() != 0)
6852 bool IsLoPart =
true;
6877 LoPart = HiPart =
nullptr;
6881 UI != UIEnd; ++UI) {
6883 if (UI.getUse().getResNo() != 0)
6889 User->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
6892 switch (
User->getConstantOperandVal(1)) {
6893 case SystemZ::subreg_l64:
6898 case SystemZ::subreg_h64:
6910SDValue SystemZTargetLowering::combineLOAD(
6911 SDNode *
N, DAGCombinerInfo &DCI)
const {
6913 EVT LdVT =
N->getValueType(0);
6914 if (
auto *LN = dyn_cast<LoadSDNode>(
N)) {
6917 MVT LoadNodeVT = LN->getBasePtr().getSimpleValueType();
6918 if (PtrVT != LoadNodeVT) {
6922 return DAG.
getExtLoad(LN->getExtensionType(),
DL, LN->getValueType(0),
6923 LN->getChain(), AddrSpaceCast, LN->getMemoryVT(),
6924 LN->getMemOperand());
6942 LD->getPointerInfo(),
LD->getOriginalAlign(),
6943 LD->getMemOperand()->getFlags(),
LD->getAAInfo());
6945 DCI.CombineTo(HiPart, EltLoad,
true);
6952 LD->getPointerInfo().getWithOffset(8),
LD->getOriginalAlign(),
6953 LD->getMemOperand()->getFlags(),
LD->getAAInfo());
6955 DCI.CombineTo(LoPart, EltLoad,
true);
6962 DCI.AddToWorklist(Chain.
getNode());
6984 else if (UI.getUse().getResNo() == 0)
6987 if (!Replicate || OtherUses.
empty())
6993 for (
SDNode *U : OtherUses) {
7002bool SystemZTargetLowering::canLoadStoreByteSwapped(
EVT VT)
const {
7003 if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64)
7005 if (Subtarget.hasVectorEnhancements2())
7006 if (VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v2i64 || VT == MVT::i128)
7018 for (
unsigned i = 0; i < NumElts; ++i) {
7019 if (M[i] < 0)
continue;
7020 if ((
unsigned) M[i] != NumElts - 1 - i)
7028 for (
auto *U : StoredVal->
uses()) {
7030 EVT CurrMemVT = ST->getMemoryVT().getScalarType();
7033 }
else if (isa<BuildVectorSDNode>(U)) {
7089SDValue SystemZTargetLowering::combineSTORE(
7090 SDNode *
N, DAGCombinerInfo &DCI)
const {
7092 auto *SN = cast<StoreSDNode>(
N);
7093 auto &Op1 =
N->getOperand(1);
7094 EVT MemVT = SN->getMemoryVT();
7098 MVT StoreNodeVT = SN->getBasePtr().getSimpleValueType();
7099 if (PtrVT != StoreNodeVT) {
7103 return DAG.
getStore(SN->getChain(),
DL, SN->getValue(), AddrSpaceCast,
7104 SN->getPointerInfo(), SN->getOriginalAlign(),
7105 SN->getMemOperand()->getFlags(), SN->getAAInfo());
7113 if (MemVT.
isInteger() && SN->isTruncatingStore()) {
7115 combineTruncateExtract(
SDLoc(
N), MemVT, SN->getValue(), DCI)) {
7116 DCI.AddToWorklist(
Value.getNode());
7120 SN->getBasePtr(), SN->getMemoryVT(),
7121 SN->getMemOperand());
7125 if (!SN->isTruncatingStore() &&
7136 N->getOperand(0), BSwapOp,
N->getOperand(2)
7141 Ops, MemVT, SN->getMemOperand());
7144 if (!SN->isTruncatingStore() &&
7147 Subtarget.hasVectorEnhancements2()) {
7157 Ops, MemVT, SN->getMemOperand());
7162 if (!SN->isTruncatingStore() &&
7165 N->getOperand(0).reachesChainWithoutSideEffects(
SDValue(Op1.
getNode(), 1))) {
7169 Ops, MemVT, SN->getMemOperand());
7179 DAG.
getStore(SN->getChain(),
DL, HiPart, SN->getBasePtr(),
7180 SN->getPointerInfo(), SN->getOriginalAlign(),
7181 SN->getMemOperand()->getFlags(), SN->getAAInfo());
7186 SN->getPointerInfo().getWithOffset(8),
7187 SN->getOriginalAlign(),
7188 SN->getMemOperand()->
getFlags(), SN->getAAInfo());
7208 if (
C->getAPIntValue().getBitWidth() > 64 ||
C->isAllOnes() ||
7212 if (VCI.isVectorConstantLegal(Subtarget) &&
7221 auto FindReplicatedReg = [&](
SDValue MulOp) {
7222 EVT MulVT = MulOp.getValueType();
7223 if (MulOp->getOpcode() ==
ISD::MUL &&
7224 (MulVT == MVT::i16 || MulVT == MVT::i32 || MulVT == MVT::i64)) {
7228 WordVT =
LHS->getOperand(0).getValueType();
7230 WordVT = cast<VTSDNode>(
LHS->getOperand(1))->getVT();
7234 if (
auto *
C = dyn_cast<ConstantSDNode>(MulOp->getOperand(1))) {
7236 APInt(MulVT.getSizeInBits(),
C->getZExtValue()));
7237 if (VCI.isVectorConstantLegal(Subtarget) &&
7239 WordVT == VCI.VecVT.getScalarType())
7245 if (isa<BuildVectorSDNode>(Op1) &&
7248 if (
auto *
C = dyn_cast<ConstantSDNode>(SplatVal))
7251 FindReplicatedReg(SplatVal);
7253 if (
auto *
C = dyn_cast<ConstantSDNode>(Op1))
7256 FindReplicatedReg(Op1);
7261 "Bad type handling");
7266 SN->getBasePtr(), SN->getMemOperand());
7273SDValue SystemZTargetLowering::combineVECTOR_SHUFFLE(
7274 SDNode *
N, DAGCombinerInfo &DCI)
const {
7278 N->getOperand(0).hasOneUse() &&
7279 Subtarget.hasVectorEnhancements2()) {
7294 Ops,
LD->getMemoryVT(),
LD->getMemOperand());
7298 DCI.CombineTo(
N, ESLoad);
7302 DCI.CombineTo(
Load.getNode(), ESLoad, ESLoad.
getValue(1));
7312SDValue SystemZTargetLowering::combineEXTRACT_VECTOR_ELT(
7313 SDNode *
N, DAGCombinerInfo &DCI)
const {
7316 if (!Subtarget.hasVector())
7322 Op.getValueType().isVector() &&
7323 Op.getOperand(0).getValueType().isVector() &&
7324 Op.getValueType().getVectorNumElements() ==
7325 Op.getOperand(0).getValueType().getVectorNumElements())
7326 Op =
Op.getOperand(0);
7330 EVT VecVT =
Op.getValueType();
7333 Op.getOperand(0),
N->getOperand(1));
7334 DCI.AddToWorklist(
Op.getNode());
7336 if (EltVT !=
N->getValueType(0)) {
7337 DCI.AddToWorklist(
Op.getNode());
7344 if (
auto *IndexN = dyn_cast<ConstantSDNode>(
N->getOperand(1))) {
7347 return combineExtract(
SDLoc(
N),
N->getValueType(0), VecVT, Op0,
7348 IndexN->getZExtValue(), DCI,
false);
7353SDValue SystemZTargetLowering::combineJOIN_DWORDS(
7354 SDNode *
N, DAGCombinerInfo &DCI)
const {
7357 if (
N->getOperand(0) ==
N->getOperand(1))
7368 if (Chain1 == Chain2)
7376SDValue SystemZTargetLowering::combineFP_ROUND(
7377 SDNode *
N, DAGCombinerInfo &DCI)
const {
7379 if (!Subtarget.hasVector())
7388 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
7391 if (
N->getValueType(0) == MVT::f32 && Op0.
hasOneUse() &&
7397 for (
auto *U : Vec->
uses()) {
7398 if (U != Op0.
getNode() &&
U->hasOneUse() &&
7400 U->getOperand(0) == Vec &&
7402 U->getConstantOperandVal(1) == 1) {
7404 if (OtherRound.
getOpcode() ==
N->getOpcode() &&
7408 if (
N->isStrictFPOpcode()) {
7413 {MVT::v4f32, MVT::Other}, {Chain, Vec});
7418 DCI.AddToWorklist(VRound.
getNode());
7422 DCI.AddToWorklist(Extract1.
getNode());
7431 N->getVTList(), Extract0, Chain);
7440SDValue SystemZTargetLowering::combineFP_EXTEND(
7441 SDNode *
N, DAGCombinerInfo &DCI)
const {
7443 if (!Subtarget.hasVector())
7452 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
7455 if (
N->getValueType(0) == MVT::f64 && Op0.
hasOneUse() &&
7461 for (
auto *U : Vec->
uses()) {
7462 if (U != Op0.
getNode() &&
U->hasOneUse() &&
7464 U->getOperand(0) == Vec &&
7466 U->getConstantOperandVal(1) == 2) {
7468 if (OtherExtend.
getOpcode() ==
N->getOpcode() &&
7472 if (
N->isStrictFPOpcode()) {
7477 {MVT::v2f64, MVT::Other}, {Chain, Vec});
7482 DCI.AddToWorklist(VExtend.
getNode());
7486 DCI.AddToWorklist(Extract1.
getNode());
7495 N->getVTList(), Extract0, Chain);
7504SDValue SystemZTargetLowering::combineINT_TO_FP(
7505 SDNode *
N, DAGCombinerInfo &DCI)
const {
7510 unsigned Opcode =
N->getOpcode();
7511 EVT OutVT =
N->getValueType(0);
7515 unsigned InScalarBits =
Op->getValueType(0).getScalarSizeInBits();
7521 if (OutLLVMTy->
isVectorTy() && OutScalarBits > InScalarBits &&
7522 OutScalarBits <= 64) {
7523 unsigned NumElts = cast<FixedVectorType>(OutLLVMTy)->getNumElements();
7526 unsigned ExtOpcode =
7534SDValue SystemZTargetLowering::combineBSWAP(
7535 SDNode *
N, DAGCombinerInfo &DCI)
const {
7539 N->getOperand(0).hasOneUse() &&
7540 canLoadStoreByteSwapped(
N->getValueType(0))) {
7549 EVT LoadVT =
N->getValueType(0);
7550 if (LoadVT == MVT::i16)
7555 Ops,
LD->getMemoryVT(),
LD->getMemOperand());
7559 if (
N->getValueType(0) == MVT::i16)
7564 DCI.CombineTo(
N, ResVal);
7568 DCI.CombineTo(
Load.getNode(), ResVal, BSLoad.
getValue(1));
7577 Op.getValueType().isVector() &&
7578 Op.getOperand(0).getValueType().isVector() &&
7579 Op.getValueType().getVectorNumElements() ==
7580 Op.getOperand(0).getValueType().getVectorNumElements())
7581 Op =
Op.getOperand(0);
7593 (canLoadStoreByteSwapped(
N->getValueType(0)) &&
7595 EVT VecVT =
N->getValueType(0);
7596 EVT EltVT =
N->getValueType(0).getVectorElementType();
7599 DCI.AddToWorklist(Vec.
getNode());
7603 DCI.AddToWorklist(Elt.
getNode());
7606 DCI.AddToWorklist(Vec.
getNode());
7608 DCI.AddToWorklist(Elt.
getNode());
7616 if (SV &&
Op.hasOneUse()) {
7624 EVT VecVT =
N->getValueType(0);
7627 DCI.AddToWorklist(Op0.
getNode());
7631 DCI.AddToWorklist(Op1.
getNode());
7634 DCI.AddToWorklist(Op0.
getNode());
7636 DCI.AddToWorklist(Op1.
getNode());
7658 auto *CompareRHS = dyn_cast<ConstantSDNode>(ICmp->getOperand(1));
7665 bool Invert =
false;
7672 auto *TrueVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(0));
7675 auto *FalseVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1));
7678 if (CompareRHS->getZExtValue() == FalseVal->getZExtValue())
7680 else if (CompareRHS->getZExtValue() != TrueVal->getZExtValue())
7684 auto *NewCCValid = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(2));
7685 auto *NewCCMask = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(3));
7686 if (!NewCCValid || !NewCCMask)
7688 CCValid = NewCCValid->getZExtValue();
7689 CCMask = NewCCMask->getZExtValue();
7699 if (CompareLHS->getOpcode() ==
ISD::SRA) {
7700 auto *SRACount = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1));
7701 if (!SRACount || SRACount->getZExtValue() != 30)
7703 auto *SHL = CompareLHS->getOperand(0).getNode();
7706 auto *SHLCount = dyn_cast<ConstantSDNode>(SHL->getOperand(1));
7709 auto *IPM = SHL->getOperand(0).getNode();
7714 if (!CompareLHS->hasOneUse())
7717 if (CompareRHS->getZExtValue() != 0)
7724 CCReg = IPM->getOperand(0);
7731SDValue SystemZTargetLowering::combineBR_CCMASK(
7732 SDNode *
N, DAGCombinerInfo &DCI)
const {
7736 auto *CCValid = dyn_cast<ConstantSDNode>(
N->getOperand(1));
7737 auto *CCMask = dyn_cast<ConstantSDNode>(
N->getOperand(2));
7738 if (!CCValid || !CCMask)
7741 int CCValidVal = CCValid->getZExtValue();
7742 int CCMaskVal = CCMask->getZExtValue();
7751 N->getOperand(3), CCReg);
7755SDValue SystemZTargetLowering::combineSELECT_CCMASK(
7756 SDNode *
N, DAGCombinerInfo &DCI)
const {
7760 auto *CCValid = dyn_cast<ConstantSDNode>(
N->getOperand(2));
7761 auto *CCMask = dyn_cast<ConstantSDNode>(
N->getOperand(3));
7762 if (!CCValid || !CCMask)
7765 int CCValidVal = CCValid->getZExtValue();
7766 int CCMaskVal = CCMask->getZExtValue();
7771 N->getOperand(0),
N->getOperand(1),
7779SDValue SystemZTargetLowering::combineGET_CCMASK(
7780 SDNode *
N, DAGCombinerInfo &DCI)
const {
7783 auto *CCValid = dyn_cast<ConstantSDNode>(
N->getOperand(1));
7784 auto *CCMask = dyn_cast<ConstantSDNode>(
N->getOperand(2));
7785 if (!CCValid || !CCMask)
7787 int CCValidVal = CCValid->getZExtValue();
7788 int CCMaskVal = CCMask->getZExtValue();
7796 auto *SelectCCValid = dyn_cast<ConstantSDNode>(
Select->getOperand(2));
7797 auto *SelectCCMask = dyn_cast<ConstantSDNode>(
Select->getOperand(3));
7798 if (!SelectCCValid || !SelectCCMask)
7800 int SelectCCValidVal = SelectCCValid->getZExtValue();
7801 int SelectCCMaskVal = SelectCCMask->getZExtValue();
7803 auto *
TrueVal = dyn_cast<ConstantSDNode>(
Select->getOperand(0));
7804 auto *
FalseVal = dyn_cast<ConstantSDNode>(
Select->getOperand(1));
7805 if (!TrueVal || !FalseVal)
7809 else if (
TrueVal->getZExtValue() == 0 &&
FalseVal->getZExtValue() == 1)
7810 SelectCCMaskVal ^= SelectCCValidVal;
7814 if (SelectCCValidVal & ~CCValidVal)
7816 if (SelectCCMaskVal != (CCMaskVal & SelectCCValidVal))
7819 return Select->getOperand(4);
7822SDValue SystemZTargetLowering::combineIntDIVREM(
7823 SDNode *
N, DAGCombinerInfo &DCI)
const {
7825 EVT VT =
N->getValueType(0);
7839SDValue SystemZTargetLowering::combineINTRINSIC(
7840 SDNode *
N, DAGCombinerInfo &DCI)
const {
7843 unsigned Id =
N->getConstantOperandVal(1);
7847 case Intrinsic::s390_vll:
7848 case Intrinsic::s390_vlrl:
7849 if (
auto *
C = dyn_cast<ConstantSDNode>(
N->getOperand(2)))
7850 if (
C->getZExtValue() >= 15)
7855 case Intrinsic::s390_vstl:
7856 case Intrinsic::s390_vstrl:
7857 if (
auto *
C = dyn_cast<ConstantSDNode>(
N->getOperand(3)))
7858 if (
C->getZExtValue() >= 15)
7869 return N->getOperand(0);
7875 switch(
N->getOpcode()) {
7900 case ISD::UREM:
return combineIntDIVREM(
N, DCI);
7912 EVT VT =
Op.getValueType();
7915 unsigned Opcode =
Op.getOpcode();
7917 unsigned Id =
Op.getConstantOperandVal(0);
7919 case Intrinsic::s390_vpksh:
7920 case Intrinsic::s390_vpksf:
7921 case Intrinsic::s390_vpksg:
7922 case Intrinsic::s390_vpkshs:
7923 case Intrinsic::s390_vpksfs:
7924 case Intrinsic::s390_vpksgs:
7925 case Intrinsic::s390_vpklsh:
7926 case Intrinsic::s390_vpklsf:
7927 case Intrinsic::s390_vpklsg:
7928 case Intrinsic::s390_vpklshs:
7929 case Intrinsic::s390_vpklsfs:
7930 case Intrinsic::s390_vpklsgs:
7932 SrcDemE = DemandedElts;
7935 SrcDemE = SrcDemE.
trunc(NumElts / 2);
7938 case Intrinsic::s390_vuphb:
7939 case Intrinsic::s390_vuphh:
7940 case Intrinsic::s390_vuphf:
7941 case Intrinsic::s390_vuplhb:
7942 case Intrinsic::s390_vuplhh:
7943 case Intrinsic::s390_vuplhf:
7944 SrcDemE =
APInt(NumElts * 2, 0);
7947 case Intrinsic::s390_vuplb:
7948 case Intrinsic::s390_vuplhw:
7949 case Intrinsic::s390_vuplf:
7950 case Intrinsic::s390_vupllb:
7951 case Intrinsic::s390_vupllh:
7952 case Intrinsic::s390_vupllf:
7953 SrcDemE =
APInt(NumElts * 2, 0);
7956 case Intrinsic::s390_vpdi: {
7958 SrcDemE =
APInt(NumElts, 0);
7959 if (!DemandedElts[OpNo - 1])
7961 unsigned Mask =
Op.getConstantOperandVal(3);
7962 unsigned MaskBit = ((OpNo - 1) ? 1 : 4);
7964 SrcDemE.
setBit((Mask & MaskBit)? 1 : 0);
7967 case Intrinsic::s390_vsldb: {
7969 assert(VT == MVT::v16i8 &&
"Unexpected type.");
7970 unsigned FirstIdx =
Op.getConstantOperandVal(3);
7971 assert (FirstIdx > 0 && FirstIdx < 16 &&
"Unused operand.");
7972 unsigned NumSrc0Els = 16 - FirstIdx;
7973 SrcDemE =
APInt(NumElts, 0);
7975 APInt DemEls = DemandedElts.
trunc(NumSrc0Els);
7978 APInt DemEls = DemandedElts.
lshr(NumSrc0Els);
7983 case Intrinsic::s390_vperm:
7984 SrcDemE =
APInt(NumElts, -1);
7994 SrcDemE =
APInt(1, 1);
7997 SrcDemE = DemandedElts;
8008 const APInt &DemandedElts,
8023 const APInt &DemandedElts,
8025 unsigned Depth)
const {
8029 unsigned tmp0, tmp1;
8034 EVT VT =
Op.getValueType();
8035 if (
Op.getResNo() != 0 || VT == MVT::Untyped)
8038 "KnownBits does not match VT in bitwidth");
8041 "DemandedElts does not match VT number of elements");
8043 unsigned Opcode =
Op.getOpcode();
8045 bool IsLogical =
false;
8046 unsigned Id =
Op.getConstantOperandVal(0);
8048 case Intrinsic::s390_vpksh:
8049 case Intrinsic::s390_vpksf:
8050 case Intrinsic::s390_vpksg:
8051 case Intrinsic::s390_vpkshs:
8052 case Intrinsic::s390_vpksfs:
8053 case Intrinsic::s390_vpksgs:
8054 case Intrinsic::s390_vpklsh:
8055 case Intrinsic::s390_vpklsf:
8056 case Intrinsic::s390_vpklsg:
8057 case Intrinsic::s390_vpklshs:
8058 case Intrinsic::s390_vpklsfs:
8059 case Intrinsic::s390_vpklsgs:
8060 case Intrinsic::s390_vpdi:
8061 case Intrinsic::s390_vsldb:
8062 case Intrinsic::s390_vperm:
8065 case Intrinsic::s390_vuplhb:
8066 case Intrinsic::s390_vuplhh:
8067 case Intrinsic::s390_vuplhf:
8068 case Intrinsic::s390_vupllb:
8069 case Intrinsic::s390_vupllh:
8070 case Intrinsic::s390_vupllf:
8073 case Intrinsic::s390_vuphb:
8074 case Intrinsic::s390_vuphh:
8075 case Intrinsic::s390_vuphf:
8076 case Intrinsic::s390_vuplb:
8077 case Intrinsic::s390_vuplhw:
8078 case Intrinsic::s390_vuplf: {
8120 if (
LHS == 1)
return 1;
8123 if (
RHS == 1)
return 1;
8124 unsigned Common = std::min(
LHS,
RHS);
8125 unsigned SrcBitWidth =
Op.getOperand(OpNo).getScalarValueSizeInBits();
8126 EVT VT =
Op.getValueType();
8128 if (SrcBitWidth > VTBits) {
8129 unsigned SrcExtraBits = SrcBitWidth - VTBits;
8130 if (Common > SrcExtraBits)
8131 return (Common - SrcExtraBits);
8134 assert (SrcBitWidth == VTBits &&
"Expected operands of same bitwidth.");
8141 unsigned Depth)
const {
8142 if (
Op.getResNo() != 0)
8144 unsigned Opcode =
Op.getOpcode();
8146 unsigned Id =
Op.getConstantOperandVal(0);
8148 case Intrinsic::s390_vpksh:
8149 case Intrinsic::s390_vpksf:
8150 case Intrinsic::s390_vpksg:
8151 case Intrinsic::s390_vpkshs:
8152 case Intrinsic::s390_vpksfs:
8153 case Intrinsic::s390_vpksgs:
8154 case Intrinsic::s390_vpklsh:
8155 case Intrinsic::s390_vpklsf:
8156 case Intrinsic::s390_vpklsg:
8157 case Intrinsic::s390_vpklshs:
8158 case Intrinsic::s390_vpklsfs:
8159 case Intrinsic::s390_vpklsgs:
8160 case Intrinsic::s390_vpdi:
8161 case Intrinsic::s390_vsldb:
8162 case Intrinsic::s390_vperm:
8164 case Intrinsic::s390_vuphb:
8165 case Intrinsic::s390_vuphh:
8166 case Intrinsic::s390_vuphf:
8167 case Intrinsic::s390_vuplb:
8168 case Intrinsic::s390_vuplhw:
8169 case Intrinsic::s390_vuplf: {
8173 EVT VT =
Op.getValueType();
8197 switch (
Op->getOpcode()) {
8210 "Unexpected stack alignment");
8213 unsigned StackProbeSize =
8216 StackProbeSize &= ~(StackAlign - 1);
8217 return StackProbeSize ? StackProbeSize : StackAlign;
8234 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
8240 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
8266 if (Succ->isLiveIn(SystemZ::CC))
8277 switch (
MI.getOpcode()) {
8278 case SystemZ::Select32:
8279 case SystemZ::Select64:
8280 case SystemZ::Select128:
8281 case SystemZ::SelectF32:
8282 case SystemZ::SelectF64:
8283 case SystemZ::SelectF128:
8284 case SystemZ::SelectVR32:
8285 case SystemZ::SelectVR64:
8286 case SystemZ::SelectVR128:
8318 for (
auto *
MI : Selects) {
8319 Register DestReg =
MI->getOperand(0).getReg();
8320 Register TrueReg =
MI->getOperand(1).getReg();
8321 Register FalseReg =
MI->getOperand(2).getReg();
8326 if (
MI->getOperand(4).getImm() == (CCValid ^ CCMask))
8329 if (RegRewriteTable.
contains(TrueReg))
8330 TrueReg = RegRewriteTable[TrueReg].first;
8332 if (RegRewriteTable.
contains(FalseReg))
8333 FalseReg = RegRewriteTable[FalseReg].second;
8336 BuildMI(*SinkMBB, SinkInsertionPoint,
DL,
TII->get(SystemZ::PHI), DestReg)
8341 RegRewriteTable[DestReg] = std::make_pair(TrueReg, FalseReg);
8353 assert(TFL->hasReservedCallFrame(MF) &&
8354 "ADJSTACKDOWN and ADJSTACKUP should be no-ops");
8359 uint32_t NumBytes =
MI.getOperand(0).getImm();
8364 MI.eraseFromParent();
8375 unsigned CCValid =
MI.getOperand(3).getImm();
8376 unsigned CCMask =
MI.getOperand(4).getImm();
8388 assert(NextMI.getOperand(3).getImm() == CCValid &&
8389 "Bad CCValid operands since CC was not redefined.");
8390 if (NextMI.getOperand(4).getImm() == CCMask ||
8391 NextMI.getOperand(4).getImm() == (CCValid ^ CCMask)) {
8397 if (NextMI.definesRegister(SystemZ::CC,
nullptr) ||
8398 NextMI.usesCustomInsertionHook())
8401 for (
auto *SelMI : Selects)
8402 if (NextMI.readsVirtualRegister(SelMI->getOperand(0).getReg())) {
8406 if (NextMI.isDebugInstr()) {
8408 assert(NextMI.isDebugValue() &&
"Unhandled debug opcode.");
8411 }
else if (
User || ++Count > 20)
8416 bool CCKilled = (LastMI->
killsRegister(SystemZ::CC,
nullptr) ||
8448 for (
auto *SelMI : Selects)
8449 SelMI->eraseFromParent();
8452 for (
auto *DbgMI : DbgValues)
8453 MBB->
splice(InsertPos, StartMBB, DbgMI);
8464 unsigned StoreOpcode,
8465 unsigned STOCOpcode,
8466 bool Invert)
const {
8471 int64_t Disp =
MI.getOperand(2).getImm();
8472 Register IndexReg =
MI.getOperand(3).getReg();
8473 unsigned CCValid =
MI.getOperand(4).getImm();
8474 unsigned CCMask =
MI.getOperand(5).getImm();
8477 StoreOpcode =
TII->getOpcodeForOffset(StoreOpcode, Disp);
8482 for (
auto *
I :
MI.memoperands())
8491 if (STOCOpcode && !IndexReg && Subtarget.hasLoadStoreOnCond()) {
8503 MI.eraseFromParent();
8517 if (!
MI.killsRegister(SystemZ::CC,
nullptr) &&
8544 MI.eraseFromParent();
8580 int HiOpcode =
Unsigned? SystemZ::VECLG : SystemZ::VECG;
8599 Register Temp =
MRI.createVirtualRegister(&SystemZ::VR128BitRegClass);
8607 MI.eraseFromParent();
8618 bool Invert)
const {
8627 int64_t Disp =
MI.getOperand(2).getImm();
8629 Register BitShift =
MI.getOperand(4).getReg();
8630 Register NegBitShift =
MI.getOperand(5).getReg();
8631 unsigned BitSize =
MI.getOperand(6).getImm();
8635 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
8636 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
8637 assert(LOpcode && CSOpcode &&
"Displacement out of range");
8640 Register OrigVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8641 Register OldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8642 Register NewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8643 Register RotatedOldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8644 Register RotatedNewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8675 Register Tmp =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8680 }
else if (BinOpcode)
8703 MI.eraseFromParent();
8714 unsigned KeepOldMask)
const {
8722 int64_t Disp =
MI.getOperand(2).getImm();
8724 Register BitShift =
MI.getOperand(4).getReg();
8725 Register NegBitShift =
MI.getOperand(5).getReg();
8726 unsigned BitSize =
MI.getOperand(6).getImm();
8730 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
8731 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
8732 assert(LOpcode && CSOpcode &&
"Displacement out of range");
8735 Register OrigVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8736 Register OldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8737 Register NewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8738 Register RotatedOldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8739 Register RotatedAltVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8740 Register RotatedNewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8807 MI.eraseFromParent();
8823 int64_t Disp =
MI.getOperand(2).getImm();
8825 Register OrigSwapVal =
MI.getOperand(4).getReg();
8826 Register BitShift =
MI.getOperand(5).getReg();
8827 Register NegBitShift =
MI.getOperand(6).getReg();
8828 int64_t BitSize =
MI.getOperand(7).getImm();
8834 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
8835 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
8836 unsigned ZExtOpcode = BitSize == 8 ? SystemZ::LLCR : SystemZ::LLHR;
8837 assert(LOpcode && CSOpcode &&
"Displacement out of range");
8840 Register OrigOldVal =
MRI.createVirtualRegister(RC);
8843 Register StoreVal =
MRI.createVirtualRegister(RC);
8844 Register OldValRot =
MRI.createVirtualRegister(RC);
8845 Register RetryOldVal =
MRI.createVirtualRegister(RC);
8846 Register RetrySwapVal =
MRI.createVirtualRegister(RC);
8921 if (!
MI.registerDefIsDead(SystemZ::CC,
nullptr))
8924 MI.eraseFromParent();
8937 .
add(
MI.getOperand(1))
8938 .
addImm(SystemZ::subreg_h64)
8939 .
add(
MI.getOperand(2))
8940 .
addImm(SystemZ::subreg_l64);
8941 MI.eraseFromParent();
8950 bool ClearEven)
const {
8958 Register In128 =
MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
8962 Register NewIn128 =
MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
8963 Register Zero64 =
MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
8974 MI.eraseFromParent();
8981 unsigned Opcode,
bool IsMemset)
const {
8988 uint64_t DestDisp =
MI.getOperand(1).getImm();
8994 if (!isUInt<12>(Disp)) {
8995 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
8996 unsigned Opcode =
TII->getOpcodeForOffset(SystemZ::LA, Disp);
9006 SrcDisp =
MI.getOperand(3).getImm();
9009 SrcDisp = DestDisp++;
9010 foldDisplIfNeeded(DestBase, DestDisp);
9014 bool IsImmForm = LengthMO.
isImm();
9015 bool IsRegForm = !IsImmForm;
9022 unsigned Length) ->
void {
9041 bool NeedsLoop =
false;
9043 Register LenAdjReg = SystemZ::NoRegister;
9045 ImmLength = LengthMO.
getImm();
9046 ImmLength += IsMemset ? 2 : 1;
9047 if (ImmLength == 0) {
9048 MI.eraseFromParent();
9051 if (Opcode == SystemZ::CLC) {
9052 if (ImmLength > 3 * 256)
9062 }
else if (ImmLength > 6 * 256)
9070 LenAdjReg = LengthMO.
getReg();
9076 (Opcode == SystemZ::CLC && (ImmLength > 256 || NeedsLoop)
9082 MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
9084 TII->loadImmediate(*
MBB,
MI, StartCountReg, ImmLength / 256);
9095 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9099 if (DestBase.
isReg() && DestBase.
getReg() == SystemZ::NoRegister)
9100 DestBase = loadZeroAddress();
9101 if (SrcBase.
isReg() && SrcBase.
getReg() == SystemZ::NoRegister)
9102 SrcBase = HaveSingleBase ? DestBase : loadZeroAddress();
9112 (HaveSingleBase ? StartSrcReg :
forceReg(
MI, DestBase,
TII));
9115 Register ThisSrcReg =
MRI.createVirtualRegister(RC);
9117 (HaveSingleBase ? ThisSrcReg :
MRI.createVirtualRegister(RC));
9118 Register NextSrcReg =
MRI.createVirtualRegister(RC);
9120 (HaveSingleBase ? NextSrcReg :
MRI.createVirtualRegister(RC));
9121 RC = &SystemZ::GR64BitRegClass;
9122 Register ThisCountReg =
MRI.createVirtualRegister(RC);
9123 Register NextCountReg =
MRI.createVirtualRegister(RC);
9149 MBB = MemsetOneCheckMBB;
9192 if (EndMBB && !ImmLength)
9214 if (!HaveSingleBase)
9221 if (Opcode == SystemZ::MVC)
9248 if (!HaveSingleBase)
9270 Register RemSrcReg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9271 Register RemDestReg = HaveSingleBase ? RemSrcReg
9272 :
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9276 if (!HaveSingleBase)
9292 if (Opcode != SystemZ::MVC) {
9302 while (ImmLength > 0) {
9306 foldDisplIfNeeded(DestBase, DestDisp);
9307 foldDisplIfNeeded(SrcBase, SrcDisp);
9308 insertMemMemOp(
MBB,
MI, DestBase, DestDisp, SrcBase, SrcDisp, ThisLength);
9309 DestDisp += ThisLength;
9310 SrcDisp += ThisLength;
9311 ImmLength -= ThisLength;
9314 if (EndMBB && ImmLength > 0) {
9330 MI.eraseFromParent();
9343 uint64_t End1Reg =
MI.getOperand(0).getReg();
9344 uint64_t Start1Reg =
MI.getOperand(1).getReg();
9345 uint64_t Start2Reg =
MI.getOperand(2).getReg();
9346 uint64_t CharReg =
MI.getOperand(3).getReg();
9349 uint64_t This1Reg =
MRI.createVirtualRegister(RC);
9350 uint64_t This2Reg =
MRI.createVirtualRegister(RC);
9389 MI.eraseFromParent();
9396 bool NoFloat)
const {
9402 MI.setDesc(
TII->get(Opcode));
9406 uint64_t Control =
MI.getOperand(2).getImm();
9407 static const unsigned GPRControlBit[16] = {
9408 0x8000, 0x8000, 0x4000, 0x4000, 0x2000, 0x2000, 0x1000, 0x1000,
9409 0x0800, 0x0800, 0x0400, 0x0400, 0x0200, 0x0200, 0x0100, 0x0100
9411 Control |= GPRControlBit[15];
9413 Control |= GPRControlBit[11];
9414 MI.getOperand(2).setImm(Control);
9417 for (
int I = 0;
I < 16;
I++) {
9418 if ((Control & GPRControlBit[
I]) == 0) {
9425 if (!NoFloat && (Control & 4) != 0) {
9426 if (Subtarget.hasVector()) {
9458 MI.eraseFromParent();
9471 Register SizeReg =
MI.getOperand(2).getReg();
9483 Register PHIReg =
MRI->createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9484 Register IncReg =
MRI->createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9549 MI.eraseFromParent();
9553SDValue SystemZTargetLowering::
9564 switch (
MI.getOpcode()) {
9565 case SystemZ::ADJCALLSTACKDOWN:
9566 case SystemZ::ADJCALLSTACKUP:
9567 return emitAdjCallStack(
MI,
MBB);
9569 case SystemZ::Select32:
9570 case SystemZ::Select64:
9571 case SystemZ::Select128:
9572 case SystemZ::SelectF32:
9573 case SystemZ::SelectF64:
9574 case SystemZ::SelectF128:
9575 case SystemZ::SelectVR32:
9576 case SystemZ::SelectVR64:
9577 case SystemZ::SelectVR128:
9578 return emitSelect(
MI,
MBB);
9580 case SystemZ::CondStore8Mux:
9581 return emitCondStore(
MI,
MBB, SystemZ::STCMux, 0,
false);
9582 case SystemZ::CondStore8MuxInv:
9583 return emitCondStore(
MI,
MBB, SystemZ::STCMux, 0,
true);
9584 case SystemZ::CondStore16Mux:
9585 return emitCondStore(
MI,
MBB, SystemZ::STHMux, 0,
false);
9586 case SystemZ::CondStore16MuxInv:
9587 return emitCondStore(
MI,
MBB, SystemZ::STHMux, 0,
true);
9588 case SystemZ::CondStore32Mux:
9589 return emitCondStore(
MI,
MBB, SystemZ::STMux, SystemZ::STOCMux,
false);
9590 case SystemZ::CondStore32MuxInv:
9591 return emitCondStore(
MI,
MBB, SystemZ::STMux, SystemZ::STOCMux,
true);
9592 case SystemZ::CondStore8:
9593 return emitCondStore(
MI,
MBB, SystemZ::STC, 0,
false);
9594 case SystemZ::CondStore8Inv:
9595 return emitCondStore(
MI,
MBB, SystemZ::STC, 0,
true);
9596 case SystemZ::CondStore16:
9597 return emitCondStore(
MI,
MBB, SystemZ::STH, 0,
false);
9598 case SystemZ::CondStore16Inv:
9599 return emitCondStore(
MI,
MBB, SystemZ::STH, 0,
true);
9600 case SystemZ::CondStore32:
9601 return emitCondStore(
MI,
MBB, SystemZ::ST, SystemZ::STOC,
false);
9602 case SystemZ::CondStore32Inv:
9603 return emitCondStore(
MI,
MBB, SystemZ::ST, SystemZ::STOC,
true);
9604 case SystemZ::CondStore64:
9605 return emitCondStore(
MI,
MBB, SystemZ::STG, SystemZ::STOCG,
false);
9606 case SystemZ::CondStore64Inv:
9607 return emitCondStore(
MI,
MBB, SystemZ::STG, SystemZ::STOCG,
true);
9608 case SystemZ::CondStoreF32:
9609 return emitCondStore(
MI,
MBB, SystemZ::STE, 0,
false);
9610 case SystemZ::CondStoreF32Inv:
9611 return emitCondStore(
MI,
MBB, SystemZ::STE, 0,
true);
9612 case SystemZ::CondStoreF64:
9613 return emitCondStore(
MI,
MBB, SystemZ::STD, 0,
false);
9614 case SystemZ::CondStoreF64Inv:
9615 return emitCondStore(
MI,
MBB, SystemZ::STD, 0,
true);
9617 case SystemZ::SCmp128Hi:
9618 return emitICmp128Hi(
MI,
MBB,
false);
9619 case SystemZ::UCmp128Hi:
9620 return emitICmp128Hi(
MI,
MBB,
true);
9622 case SystemZ::PAIR128:
9623 return emitPair128(
MI,
MBB);
9624 case SystemZ::AEXT128:
9625 return emitExt128(
MI,
MBB,
false);
9626 case SystemZ::ZEXT128:
9627 return emitExt128(
MI,
MBB,
true);
9629 case SystemZ::ATOMIC_SWAPW:
9630 return emitAtomicLoadBinary(
MI,
MBB, 0);
9632 case SystemZ::ATOMIC_LOADW_AR:
9633 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::AR);
9634 case SystemZ::ATOMIC_LOADW_AFI:
9635 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::AFI);
9637 case SystemZ::ATOMIC_LOADW_SR:
9638 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::SR);
9640 case SystemZ::ATOMIC_LOADW_NR:
9641 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NR);
9642 case SystemZ::ATOMIC_LOADW_NILH:
9643 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NILH);
9645 case SystemZ::ATOMIC_LOADW_OR:
9646 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::OR);
9647 case SystemZ::ATOMIC_LOADW_OILH:
9648 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::OILH);
9650 case SystemZ::ATOMIC_LOADW_XR:
9651 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::XR);
9652 case SystemZ::ATOMIC_LOADW_XILF:
9653 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::XILF);
9655 case SystemZ::ATOMIC_LOADW_NRi:
9656 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NR,
true);
9657 case SystemZ::ATOMIC_LOADW_NILHi:
9658 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NILH,
true);
9660 case SystemZ::ATOMIC_LOADW_MIN:
9662 case SystemZ::ATOMIC_LOADW_MAX:
9664 case SystemZ::ATOMIC_LOADW_UMIN:
9666 case SystemZ::ATOMIC_LOADW_UMAX:
9669 case SystemZ::ATOMIC_CMP_SWAPW:
9670 return emitAtomicCmpSwapW(
MI,
MBB);
9671 case SystemZ::MVCImm:
9672 case SystemZ::MVCReg:
9673 return emitMemMemWrapper(
MI,
MBB, SystemZ::MVC);
9674 case SystemZ::NCImm:
9675 return emitMemMemWrapper(
MI,
MBB, SystemZ::NC);
9676 case SystemZ::OCImm:
9677 return emitMemMemWrapper(
MI,
MBB, SystemZ::OC);
9678 case SystemZ::XCImm:
9679 case SystemZ::XCReg:
9680 return emitMemMemWrapper(
MI,
MBB, SystemZ::XC);
9681 case SystemZ::CLCImm:
9682 case SystemZ::CLCReg:
9683 return emitMemMemWrapper(
MI,
MBB, SystemZ::CLC);
9684 case SystemZ::MemsetImmImm:
9685 case SystemZ::MemsetImmReg:
9686 case SystemZ::MemsetRegImm:
9687 case SystemZ::MemsetRegReg:
9688 return emitMemMemWrapper(
MI,
MBB, SystemZ::MVC,
true);
9689 case SystemZ::CLSTLoop:
9690 return emitStringWrapper(
MI,
MBB, SystemZ::CLST);
9691 case SystemZ::MVSTLoop:
9692 return emitStringWrapper(
MI,
MBB, SystemZ::MVST);
9693 case SystemZ::SRSTLoop:
9694 return emitStringWrapper(
MI,
MBB, SystemZ::SRST);
9695 case SystemZ::TBEGIN:
9696 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGIN,
false);
9697 case SystemZ::TBEGIN_nofloat:
9698 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGIN,
true);
9699 case SystemZ::TBEGINC:
9700 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGINC,
true);
9701 case SystemZ::LTEBRCompare_Pseudo:
9702 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTEBR);
9703 case SystemZ::LTDBRCompare_Pseudo:
9704 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTDBR);
9705 case SystemZ::LTXBRCompare_Pseudo:
9706 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTXBR);
9708 case SystemZ::PROBED_ALLOCA:
9709 return emitProbedAlloca(
MI,
MBB);
9711 case TargetOpcode::STACKMAP:
9712 case TargetOpcode::PATCHPOINT:
9723SystemZTargetLowering::getRepRegClassFor(
MVT VT)
const {
9724 if (VT == MVT::Untyped)
9725 return &SystemZ::ADDR128BitRegClass;
9751 DAG.
getMachineNode(SystemZ::EFPC, dl, {MVT::i32, MVT::Other}, Chain), 0);
9771 EVT VT =
Op.getValueType();
9772 Op =
Op.getOperand(0);
9773 EVT OpVT =
Op.getValueType();
9775 assert(OpVT.
isVector() &&
"Operand type for VECREDUCE_ADD is not a vector.");
unsigned const MachineRegisterInfo * MRI
static msgpack::DocNode getNode(msgpack::DocNode DN, msgpack::Type Type, MCValue Val)
amdgpu AMDGPU Register Bank Select
static bool isZeroVector(SDValue N)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Function Alias Analysis Results
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
static bool isUndef(ArrayRef< int > Mask)
unsigned const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
static bool isSelectPseudo(MachineInstr &MI)
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static void adjustForLTGFR(Comparison &C)
static void adjustSubwordCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static SDValue joinDwords(SelectionDAG &DAG, const SDLoc &DL, SDValue Op0, SDValue Op1)
static bool isOnlyUsedByStores(SDValue StoredVal, SelectionDAG &DAG)
static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT, unsigned Opcode, SDValue Op0, SDValue Op1, SDValue &Even, SDValue &Odd)
static void adjustForRedundantAnd(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static SDValue lowerAddrSpaceCast(SDValue Op, SelectionDAG &DAG)
static SDValue buildScalarToVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Value)
static SDValue lowerI128ToGR128(SelectionDAG &DAG, SDValue In)
static bool isSimpleShift(SDValue N, unsigned &ShiftVal)
static bool isI128MovedToParts(LoadSDNode *LD, SDNode *&LoPart, SDNode *&HiPart)
static bool chooseShuffleOpNos(int *OpNos, unsigned &OpNo0, unsigned &OpNo1)
static uint32_t findZeroVectorIdx(SDValue *Ops, unsigned Num)
static bool isVectorElementSwap(ArrayRef< int > M, EVT VT)
static void getCSAddressAndShifts(SDValue Addr, SelectionDAG &DAG, SDLoc DL, SDValue &AlignedAddr, SDValue &BitShift, SDValue &NegBitShift)
static bool isShlDoublePermute(const SmallVectorImpl< int > &Bytes, unsigned &StartIndex, unsigned &OpNo0, unsigned &OpNo1)
static SDValue getPermuteNode(SelectionDAG &DAG, const SDLoc &DL, const Permute &P, SDValue Op0, SDValue Op1)
static SDNode * emitIntrinsicWithCCAndChain(SelectionDAG &DAG, SDValue Op, unsigned Opcode)
static SDValue getCCResult(SelectionDAG &DAG, SDValue CCReg)
static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode, unsigned &CCValid)
static void lowerMUL_LOHI32(SelectionDAG &DAG, const SDLoc &DL, unsigned Extend, SDValue Op0, SDValue Op1, SDValue &Hi, SDValue &Lo)
static bool isF128MovedToParts(LoadSDNode *LD, SDNode *&LoPart, SDNode *&HiPart)
static void createPHIsForSelects(SmallVector< MachineInstr *, 8 > &Selects, MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB, MachineBasicBlock *SinkMBB)
static SDValue getGeneralPermuteNode(SelectionDAG &DAG, const SDLoc &DL, SDValue *Ops, const SmallVectorImpl< int > &Bytes)
static unsigned getVectorComparisonOrInvert(ISD::CondCode CC, CmpMode Mode, bool &Invert)
static unsigned CCMaskForCondCode(ISD::CondCode CC)
static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static void adjustForFNeg(Comparison &C)
static bool isScalarToVector(SDValue Op)
static SDValue emitSETCC(SelectionDAG &DAG, const SDLoc &DL, SDValue CCReg, unsigned CCValid, unsigned CCMask)
static bool matchPermute(const SmallVectorImpl< int > &Bytes, const Permute &P, unsigned &OpNo0, unsigned &OpNo1)
static bool isAddCarryChain(SDValue Carry)
static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static MachineOperand earlyUseOperand(MachineOperand Op)
static bool canUseSiblingCall(const CCState &ArgCCInfo, SmallVectorImpl< CCValAssign > &ArgLocs, SmallVectorImpl< ISD::OutputArg > &Outs)
static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask)
static bool getzOSCalleeAndADA(SelectionDAG &DAG, SDValue &Callee, SDValue &ADA, SDLoc &DL, SDValue &Chain)
static bool shouldSwapCmpOperands(const Comparison &C)
static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType)
static SDValue getADAEntry(SelectionDAG &DAG, SDValue Val, SDLoc DL, unsigned Offset, bool LoadAdr=false)
static SDNode * emitIntrinsicWithCC(SelectionDAG &DAG, SDValue Op, unsigned Opcode)
static void adjustForSubtraction(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static bool getVPermMask(SDValue ShuffleOp, SmallVectorImpl< int > &Bytes)
static const Permute PermuteForms[]
static bool isI128MovedFromParts(SDValue Val, SDValue &LoPart, SDValue &HiPart)
static bool isSubBorrowChain(SDValue Carry)
static void adjustICmp128(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static APInt getDemandedSrcElements(SDValue Op, const APInt &DemandedElts, unsigned OpNo)
static SDValue getAbsolute(SelectionDAG &DAG, const SDLoc &DL, SDValue Op, bool IsNegative)
static unsigned computeNumSignBitsBinOp(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth, unsigned OpNo)
static SDValue expandBitCastI128ToF128(SelectionDAG &DAG, SDValue Src, const SDLoc &SL)
static SDValue tryBuildVectorShuffle(SelectionDAG &DAG, BuildVectorSDNode *BVN)
static unsigned getVectorComparison(ISD::CondCode CC, CmpMode Mode)
static SDValue lowerGR128ToI128(SelectionDAG &DAG, SDValue In)
static SDValue MergeInputChains(SDNode *N1, SDNode *N2)
static SDValue expandBitCastF128ToI128(SelectionDAG &DAG, SDValue Src, const SDLoc &SL)
static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask, uint64_t Mask, uint64_t CmpVal, unsigned ICmpType)
static bool isIntrinsicWithCC(SDValue Op, unsigned &Opcode, unsigned &CCValid)
static SDValue expandV4F32ToV2F64(SelectionDAG &DAG, int Start, const SDLoc &DL, SDValue Op, SDValue Chain)
static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1, ISD::CondCode Cond, const SDLoc &DL, SDValue Chain=SDValue(), bool IsSignaling=false)
static bool checkCCKill(MachineInstr &MI, MachineBasicBlock *MBB)
static Register forceReg(MachineInstr &MI, MachineOperand &Base, const SystemZInstrInfo *TII)
static bool is32Bit(EVT VT)
static std::pair< unsigned, const TargetRegisterClass * > parseRegisterNumber(StringRef Constraint, const TargetRegisterClass *RC, const unsigned *Map, unsigned Size)
static bool matchDoublePermute(const SmallVectorImpl< int > &Bytes, const Permute &P, SmallVectorImpl< int > &Transform)
static Comparison getIntrinsicCmp(SelectionDAG &DAG, unsigned Opcode, SDValue Call, unsigned CCValid, uint64_t CC, ISD::CondCode Cond)
static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg)
static AddressingMode getLoadStoreAddrMode(bool HasVector, Type *Ty)
static SDValue buildMergeScalars(SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Op0, SDValue Op1)
static void computeKnownBitsBinOp(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth, unsigned OpNo)
static bool getShuffleInput(const SmallVectorImpl< int > &Bytes, unsigned Start, unsigned BytesPerElement, int &Base)
static AddressingMode supportedAddressingMode(Instruction *I, bool HasVector)
static bool isF128MovedFromParts(SDValue Val, SDValue &LoPart, SDValue &HiPart)
static void adjustZeroCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
Class for arbitrary precision integers.
APInt zext(unsigned width) const
Zero extend to a new width.
uint64_t getZExtValue() const
Get zero extended value.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
unsigned getActiveBits() const
Compute the number of active bits in the value.
APInt trunc(unsigned width) const
Truncate to new width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isSingleWord() const
Determine if this APInt just has one word to store value.
void insertBits(const APInt &SubBits, unsigned bitPosition)
Insert the bits from a smaller APInt starting at bitPosition.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
an instruction that atomically reads a memory location, combines it with another value,...
BinOp getOperation() const
StringRef getValueAsString() const
Return the attribute's value as a string.
The address of a basic block.
A "pseudo-class" with methods for operating on BUILD_VECTORs.
bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector.
CCState - This class holds information needed while lowering arguments and return values.
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
LocInfo getLocInfo() const
int64_t getLocMemOffset() const
This class represents a function call, abstracting a target machine's calling convention.
This is an important base class in LLVM.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
uint64_t getFnAttributeAsParsedInteger(StringRef Kind, uint64_t Default=0) const
For a string attribute Kind, parse attribute as an integer.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
const GlobalObject * getAliaseeObject() const
bool hasPrivateLinkage() const
bool hasInternalLinkage() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
This class is used to represent ISD::LOAD nodes.
static auto integer_fixedlen_vector_valuetypes()
bool isVector() const
Return true if this is a vector value type.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
static MVT getVectorVT(MVT VT, unsigned NumElements)
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator_range< succ_iterator > successors()
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setMaxCallFrameSize(uint64_t S)
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setAdjustsStack(bool V)
void setFrameAddressIsTaken(bool T)
uint64_t getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
void setReturnAddressIsTaken(bool s)
MachineFunctionProperties & reset(Property P)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
reverse_iterator rbegin()
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineFunctionProperties & getProperties() const
Get the function properties.
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr reads the specified register.
bool killsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr kills the specified register.
bool definesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr fully defines the specified register.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
Register getReg() const
getReg - Returns the register number.
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
This class provides iterator support for SDUse operands that use a specific SDNode.
Represents one node in the SelectionDAG.
bool hasOneUse() const
Return true if there is exactly one use of this node.
iterator_range< use_iterator > uses()
SDNodeFlags getFlags() const
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
void setFlags(SDNodeFlags NewFlags)
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
bool isMachineOpcode() const
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
const APInt & getConstantOperandAPInt(unsigned i) const
uint64_t getScalarValueSizeInBits() const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
unsigned getMachineOpcode() const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getAddrSpaceCast(const SDLoc &dl, EVT VT, SDValue Ptr, unsigned SrcAS, unsigned DestAS)
Return an AddrSpaceCastSDNode.
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, const CallInst *CI, std::optional< bool > OverrideTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
SDNode * isConstantIntBuildVectorOrConstantInt(SDValue N) const
Test whether the given value is a constant int or similar node.
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getGLOBAL_OFFSET_TABLE(EVT VT)
Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, MachineMemOperand *MMO)
Gets a node for an atomic op, produces result (if relevant) and chain and takes 2 operands.
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
const TargetLowering & getTargetLoweringInfo() const
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
const DataLayout & getDataLayout() const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
bool SignBitIsZero(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero.
SDValue getRegister(unsigned Reg, EVT VT)
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getRegisterMask(const uint32_t *RegMask)
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
ArrayRef< int > getMask() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
constexpr size_t size() const
size - Get the string size.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
A SystemZ-specific class detailing special use registers particular for calling conventions.
virtual int getStackPointerBias()=0
virtual int getReturnFunctionAddressRegister()=0
virtual int getCallFrameSize()=0
virtual int getStackPointerRegister()=0
A SystemZ-specific constant pool value.
static SystemZConstantPoolValue * Create(const GlobalValue *GV, SystemZCP::SystemZCPModifier Modifier)
unsigned getVarArgsFrameIndex() const
void setVarArgsFrameIndex(unsigned FI)
void setRegSaveFrameIndex(unsigned FI)
void incNumLocalDynamicTLSAccesses()
Register getVarArgsFirstGPR() const
void setADAVirtualRegister(Register Reg)
void setVarArgsFirstGPR(Register GPR)
Register getADAVirtualRegister() const
void setSizeOfFnParams(unsigned Size)
void setVarArgsFirstFPR(Register FPR)
unsigned getRegSaveFrameIndex() const
Register getVarArgsFirstFPR() const
const SystemZInstrInfo * getInstrInfo() const override
bool isPC32DBLSymbol(const GlobalValue *GV, CodeModel::Model CM) const
const TargetFrameLowering * getFrameLowering() const override
bool isTargetXPLINK64() const
SystemZCallingConventionRegisters * getSpecialRegisters() const
const SystemZRegisterInfo * getRegisterInfo() const override
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes) const override
Returns the target specific optimal type for load and store operations as a result of memset,...
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
bool hasInlineStackProbe(const MachineFunction &MF) const override
Returns true if stack probing through inline assembly is requested.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *BB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
AtomicExpansionKind shouldCastAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be cast by the IR-level AtomicExpand pass.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &, EVT) const override
Return the ValueType of the result of SETCC operations.
bool allowTruncateForTailCall(Type *, Type *) const override
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
bool findOptimalMemOpLowering(std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const override
Determines the optimal series of memory ops to replace the memset / memcpy.
bool useSoftFloat() const override
std::pair< SDValue, SDValue > makeExternalCall(SDValue Chain, SelectionDAG &DAG, const char *CalleeName, EVT RetVT, ArrayRef< SDValue > Ops, CallingConv::ID CallConv, bool IsSigned, SDLoc DL, bool DoesNotReturn, bool IsReturnValueUsed) const
bool mayBeEmittedAsTailCall(const CallInst *CI) const override
Return true if the target may be able emit the call instruction as a tail call.
bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const override
Target-specific splitting of values into parts that fit a register storing a legal type.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
SystemZTargetLowering(const TargetMachine &TM, const SystemZSubtarget &STI)
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
bool isLegalICmpImmediate(int64_t Imm) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
TargetLowering::ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const override
Determine if the target supports unaligned memory accesses.
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
TargetLowering::ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const override
Target-specific combining of register parts into its original value.
bool isTruncateFree(Type *, Type *) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const override
Determine the number of bits in the operation that are sign bits.
void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
bool isLegalAddImmediate(int64_t Imm) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
bool isGuaranteedNotToBeUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, unsigned Depth) const override
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
AtomicExpansionKind shouldCastAtomicStoreInIR(StoreInst *SI) const override
Returns how the given (atomic) store should be cast by the IR-level AtomicExpand pass into.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
unsigned getStackProbeSize(const MachineFunction &MF) const
XPLINK64 calling convention specific use registers Particular to z/OS when in 64 bit mode.
int getCallFrameSize() final
Information about stack frame layout on the target.
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
virtual bool hasFP(const MachineFunction &MF) const =0
hasFP - Return true if the specified function should have a dedicated frame pointer register.
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const
Returns true if arguments should be sign-extended in lib calls.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setAtomicLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Let target indicate that an extending atomic load of the specified type is legal.
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
virtual const TargetRegisterClass * getRepRegClassFor(MVT VT) const
Return the 'representative' register class for the specified value type.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool findOptimalMemOpLowering(std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const
Determines the optimal series of memory ops to replace the memset / memcpy.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Primary interface to the complete machine description for the target machine.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
unsigned getPointerSize(unsigned AS) const
Get the pointer size for this target.
CodeModel::Model getCodeModel() const
Returns the code model.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
constexpr ScalarTy getFixedValue() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ MEMBARRIER
MEMBARRIER - Compiler barrier only; generate a no-op.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ STRICT_FSQRT
Constrained versions of libm-equivalent floating point intrinsics.
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
@ BR_JT
BR_JT - Jumptable branch.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ UNDEF
UNDEF - An undefined node.
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ GET_ROUNDING
Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest, ties to even 2 Round to ...
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ STRICT_SINT_TO_FP
STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to a floating point value.
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ STRICT_FADD
Constrained versions of the binary floating point operators.
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ ADDRSPACECAST
ADDRSPACECAST - This operator converts between pointers of different address spaces.
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ BRCOND
BRCOND - Conditional branch.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ GET_DYNAMIC_AREA_OFFSET
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isNormalStore(const SDNode *N)
Returns true if the specified node is a non-truncating and unindexed store.
CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
@ Define
Register definition.
@ System
Synchronized with respect to all concurrently executing threads.
@ MO_ADA_DATA_SYMBOL_ADDR
@ MO_ADA_DIRECT_FUNC_DESC
@ MO_ADA_INDIRECT_FUNC_DESC
const unsigned GR64Regs[16]
const unsigned VR128Regs[32]
const unsigned GR128Regs[16]
const unsigned FP32Regs[16]
const unsigned GR32Regs[16]
const unsigned FP64Regs[16]
const int64_t ELFCallFrameSize
const unsigned VR64Regs[32]
const unsigned FP128Regs[16]
const unsigned VR32Regs[32]
unsigned odd128(bool Is32bit)
const unsigned CCMASK_CMP_GE
static bool isImmHH(uint64_t Val)
const unsigned CCMASK_TEND
const unsigned CCMASK_CS_EQ
const unsigned CCMASK_TBEGIN
const MCPhysReg ELFArgFPRs[ELFNumArgFPRs]
MachineBasicBlock * splitBlockBefore(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
const unsigned CCMASK_TM_SOME_1
const unsigned CCMASK_LOGICAL_CARRY
const unsigned TDCMASK_NORMAL_MINUS
const unsigned CCMASK_TDC
const unsigned CCMASK_FCMP
const unsigned CCMASK_TM_SOME_0
static bool isImmHL(uint64_t Val)
const unsigned TDCMASK_SUBNORMAL_MINUS
const unsigned TDCMASK_NORMAL_PLUS
const unsigned CCMASK_CMP_GT
const unsigned TDCMASK_QNAN_MINUS
const unsigned CCMASK_ANY
const unsigned CCMASK_ARITH
const unsigned CCMASK_TM_MIXED_MSB_0
const unsigned TDCMASK_SUBNORMAL_PLUS
static bool isImmLL(uint64_t Val)
const unsigned VectorBits
static bool isImmLH(uint64_t Val)
MachineBasicBlock * emitBlockAfter(MachineBasicBlock *MBB)
const unsigned TDCMASK_INFINITY_PLUS
unsigned reverseCCMask(unsigned CCMask)
const unsigned CCMASK_TM_ALL_0
const unsigned CCMASK_CMP_LE
const unsigned CCMASK_CMP_O
const unsigned CCMASK_CMP_EQ
const unsigned VectorBytes
const unsigned TDCMASK_INFINITY_MINUS
const unsigned CCMASK_ICMP
const unsigned CCMASK_VCMP_ALL
MachineBasicBlock * splitBlockAfter(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
const unsigned CCMASK_VCMP
const unsigned CCMASK_TM_MIXED_MSB_1
const unsigned CCMASK_TM_MSB_0
const unsigned CCMASK_ARITH_OVERFLOW
const unsigned CCMASK_CS_NE
const unsigned TDCMASK_SNAN_PLUS
const unsigned CCMASK_CMP_LT
const unsigned CCMASK_CMP_NE
const unsigned TDCMASK_ZERO_PLUS
const unsigned TDCMASK_QNAN_PLUS
const unsigned TDCMASK_ZERO_MINUS
unsigned even128(bool Is32bit)
const unsigned CCMASK_TM_ALL_1
const unsigned CCMASK_LOGICAL_BORROW
const unsigned ELFNumArgFPRs
const unsigned CCMASK_CMP_UO
const unsigned CCMASK_LOGICAL
const unsigned CCMASK_TM_MSB_1
const unsigned TDCMASK_SNAN_MINUS
Reg
All possible values of the reg field in the ModR/M byte.
support::ulittle32_t Word
NodeAddr< CodeNode * > Code
constexpr const char32_t SBase
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
testing::Matcher< const detail::ErrorHolder & > Failed()
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void dumpBytes(ArrayRef< uint8_t > Bytes, raw_ostream &OS)
Convert ‘Bytes’ to a hex string and output to ‘OS’.
T bit_ceil(T Value)
Returns the smallest integral power of two no smaller than Value if Value is nonzero.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
@ Mul
Product of integers.
DWARFExpression::Operation Op
constexpr unsigned BitWidth
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
AddressingMode(bool LongDispl, bool IdxReg)
This struct is a compact representation of a valid (non-zero power of two) alignment.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isRound() const
Return true if the size is a power-of-two number of bytes.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
unsigned getBitWidth() const
Get the bit width of this value.
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
void resetAll()
Resets the known state of all bits.
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
These are IR-level optimization flags that may be propagated to SDNodes.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
SystemZVectorConstantInfo(APInt IntImm)
SmallVector< unsigned, 2 > OpVals
bool isVectorConstantLegal(const SystemZSubtarget &Subtarget)
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This contains information for each constraint that we are lowering.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setZExtResult(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setSExtResult(bool Value=true)
CallLoweringInfo & setNoReturn(bool Value=true)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})