25#include "llvm/IR/IntrinsicsS390.h"
34#define DEBUG_TYPE "systemz-lower"
40 : Op0(Op0In), Op1(Op1In), Chain(ChainIn),
41 Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {}
91 if (Subtarget.hasHighWord())
97 if (Subtarget.hasVector()) {
104 if (Subtarget.hasVectorEnhancements1())
109 if (Subtarget.hasVector()) {
118 if (Subtarget.hasVector())
145 for (
unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
146 I <= MVT::LAST_FP_VALUETYPE;
172 for (
unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
173 I <= MVT::LAST_INTEGER_VALUETYPE;
204 if (Subtarget.hasPopulationCount())
223 if (!Subtarget.hasFPExtension())
229 if (Subtarget.hasFPExtension())
234 if (Subtarget.hasFPExtension())
303 {MVT::i8, MVT::i16, MVT::i32},
Legal);
305 {MVT::i8, MVT::i16},
Legal);
322 if (!Subtarget.hasFPExtension()) {
335 if (Subtarget.hasMiscellaneousExtensions3()) {
431 if (VT != MVT::v2i64)
437 if (Subtarget.hasVectorEnhancements1())
468 if (Subtarget.hasVector()) {
490 if (Subtarget.hasVectorEnhancements2()) {
511 for (
unsigned I = MVT::FIRST_FP_VALUETYPE;
512 I <= MVT::LAST_FP_VALUETYPE;
520 if (Subtarget.hasFPExtension()) {
548 if (Subtarget.hasFPExtension()) {
559 if (Subtarget.hasVector()) {
605 if (Subtarget.hasVectorEnhancements1()) {
612 if (Subtarget.hasVectorEnhancements1()) {
666 for (
auto VT : { MVT::f32, MVT::f64, MVT::f128,
667 MVT::v4f32, MVT::v2f64 }) {
676 if (!Subtarget.hasVectorEnhancements1()) {
682 if (Subtarget.hasVectorEnhancements1())
692 if (Subtarget.hasVectorEnhancements1()) {
704 if (!Subtarget.hasVector()) {
759 struct RTLibCallMapping {
763 static RTLibCallMapping RTLibCallCommon[] = {
764#define HANDLE_LIBCALL(code, name) {RTLIB::code, name},
765#include "ZOSLibcallNames.def"
767 for (
auto &E : RTLibCallCommon)
773 return Subtarget.hasSoftFloat();
795 return Subtarget.hasVectorEnhancements1();
808 if (!Subtarget.hasVector() ||
809 (isFP128 && !Subtarget.hasVectorEnhancements1()))
831 if (SplatBitSize > 64)
837 if (isInt<16>(SignedValue)) {
846 if (
TII->isRxSBGMask(
Value, SplatBitSize, Start,
End)) {
868 uint64_t Lower = SplatUndefZ & maskTrailingOnes<uint64_t>(LowerBits);
869 uint64_t Upper = SplatUndefZ & maskLeadingOnes<uint64_t>(UpperBits);
876 uint64_t Middle = SplatUndefZ & ~Upper & ~Lower;
877 return tryValue(SplatBitsZ | Middle);
892 unsigned HalfSize = Width / 2;
897 if (HighValue != LowValue || 8 > HalfSize)
900 SplatBits = HighValue;
904 SplatBitSize = Width;
912 BVN->
isConstantSplat(IntBits, SplatUndef, SplatBitSize, HasAnyUndefs, 128,
916 BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, 8,
921 bool ForCodeSize)
const {
923 if (Imm.isZero() || Imm.isNegZero())
955 if (Subtarget.hasInterlockedAccess1() &&
969 return isInt<32>(Imm) || isUInt<32>(Imm);
974 return isUInt<32>(Imm) || isUInt<32>(-Imm);
996 LongDisplacement(LongDispl), IndexReg(IdxReg) {}
1019 switch (II->getIntrinsicID()) {
1021 case Intrinsic::memset:
1022 case Intrinsic::memmove:
1023 case Intrinsic::memcpy:
1028 if (isa<LoadInst>(
I) &&
I->hasOneUse()) {
1029 auto *SingleUser = cast<Instruction>(*
I->user_begin());
1030 if (SingleUser->getParent() ==
I->getParent()) {
1031 if (isa<ICmpInst>(SingleUser)) {
1032 if (
auto *
C = dyn_cast<ConstantInt>(SingleUser->getOperand(1)))
1033 if (
C->getBitWidth() <= 64 &&
1034 (isInt<16>(
C->getSExtValue()) || isUInt<16>(
C->getZExtValue())))
1037 }
else if (isa<StoreInst>(SingleUser))
1041 }
else if (
auto *StoreI = dyn_cast<StoreInst>(
I)) {
1042 if (
auto *LoadI = dyn_cast<LoadInst>(StoreI->getValueOperand()))
1043 if (LoadI->hasOneUse() && LoadI->getParent() ==
I->getParent())
1048 if (HasVector && (isa<LoadInst>(
I) || isa<StoreInst>(
I))) {
1056 Type *MemAccessTy = (isa<LoadInst>(
I) ?
I->getType() :
1057 I->getOperand(0)->getType());
1059 bool IsVectorAccess = MemAccessTy->
isVectorTy();
1063 if (!IsVectorAccess && isa<StoreInst>(
I)) {
1064 Value *DataOp =
I->getOperand(0);
1065 if (isa<ExtractElementInst>(DataOp))
1066 IsVectorAccess =
true;
1071 if (!IsVectorAccess && isa<LoadInst>(
I) &&
I->hasOneUse()) {
1072 User *LoadUser = *
I->user_begin();
1073 if (isa<InsertElementInst>(LoadUser))
1074 IsVectorAccess =
true;
1077 if (IsFPAccess || IsVectorAccess)
1106 return AM.
Scale == 0;
1113 std::vector<EVT> &MemOps,
unsigned Limit,
const MemOp &
Op,
unsigned DstAS,
1114 unsigned SrcAS,
const AttributeList &FuncAttributes)
const {
1115 const int MVCFastLen = 16;
1117 if (Limit != ~
unsigned(0)) {
1119 if (
Op.isMemcpy() &&
Op.allowOverlap() &&
Op.size() <= MVCFastLen)
1121 if (
Op.isMemset() &&
Op.size() - 1 <= MVCFastLen)
1123 if (
Op.isZeroMemset())
1128 SrcAS, FuncAttributes);
1133 return Subtarget.hasVector() ? MVT::v2i64 : MVT::Other;
1137 if (!FromType->isIntegerTy() || !ToType->
isIntegerTy())
1139 unsigned FromBits = FromType->getPrimitiveSizeInBits().getFixedValue();
1141 return FromBits > ToBits;
1149 return FromBits > ToBits;
1158 if (Constraint.
size() == 1) {
1159 switch (Constraint[0]) {
1185 }
else if (Constraint.
size() == 2 && Constraint[0] ==
'Z') {
1186 switch (Constraint[1]) {
1202 const char *constraint)
const {
1204 Value *CallOperandVal =
info.CallOperandVal;
1207 if (!CallOperandVal)
1211 switch (*constraint) {
1229 if (Subtarget.hasVector())
1235 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1236 if (isUInt<8>(
C->getZExtValue()))
1241 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1242 if (isUInt<12>(
C->getZExtValue()))
1247 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1248 if (isInt<16>(
C->getSExtValue()))
1253 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1254 if (isInt<20>(
C->getSExtValue()))
1259 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1260 if (
C->getZExtValue() == 0x7fffffff)
1270static std::pair<unsigned, const TargetRegisterClass *>
1272 const unsigned *Map,
unsigned Size) {
1273 assert(*(Constraint.
end()-1) ==
'}' &&
"Missing '}'");
1274 if (isdigit(Constraint[2])) {
1279 return std::make_pair(Map[
Index], RC);
1281 return std::make_pair(0U,
nullptr);
1284std::pair<unsigned, const TargetRegisterClass *>
1287 if (Constraint.
size() == 1) {
1289 switch (Constraint[0]) {
1294 return std::make_pair(0U, &SystemZ::GR64BitRegClass);
1296 return std::make_pair(0U, &SystemZ::GR128BitRegClass);
1297 return std::make_pair(0U, &SystemZ::GR32BitRegClass);
1301 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass);
1302 else if (VT == MVT::i128)
1303 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass);
1304 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass);
1307 return std::make_pair(0U, &SystemZ::GRH32BitRegClass);
1312 return std::make_pair(0U, &SystemZ::FP64BitRegClass);
1314 return std::make_pair(0U, &SystemZ::FP128BitRegClass);
1315 return std::make_pair(0U, &SystemZ::FP32BitRegClass);
1320 if (Subtarget.hasVector()) {
1322 return std::make_pair(0U, &SystemZ::VR32BitRegClass);
1324 return std::make_pair(0U, &SystemZ::VR64BitRegClass);
1325 return std::make_pair(0U, &SystemZ::VR128BitRegClass);
1334 auto getVTSizeInBits = [&VT]() {
1342 if (Constraint[1] ==
'r') {
1343 if (getVTSizeInBits() == 32)
1346 if (getVTSizeInBits() == 128)
1352 if (Constraint[1] ==
'f') {
1354 return std::make_pair(
1356 if (getVTSizeInBits() == 32)
1359 if (getVTSizeInBits() == 128)
1365 if (Constraint[1] ==
'v') {
1366 if (!Subtarget.hasVector())
1367 return std::make_pair(
1369 if (getVTSizeInBits() == 32)
1372 if (getVTSizeInBits() == 64)
1399 const Constant *PersonalityFn)
const {
1404 const Constant *PersonalityFn)
const {
1412 if (Constraint.
size() == 1) {
1413 switch (Constraint[0]) {
1415 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1416 if (isUInt<8>(
C->getZExtValue()))
1418 Op.getValueType()));
1422 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1423 if (isUInt<12>(
C->getZExtValue()))
1425 Op.getValueType()));
1429 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1430 if (isInt<16>(
C->getSExtValue()))
1432 Op.getValueType()));
1436 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1437 if (isInt<20>(
C->getSExtValue()))
1439 Op.getValueType()));
1443 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1444 if (
C->getZExtValue() == 0x7fffffff)
1446 Op.getValueType()));
1457#include "SystemZGenCallingConv.inc"
1461 static const MCPhysReg ScratchRegs[] = { SystemZ::R0D, SystemZ::R1D,
1467 Type *ToType)
const {
1530 if (BitCastToType == MVT::v2i64)
1557 MVT::Untyped,
Hi,
Lo);
1581 unsigned NumParts,
MVT PartVT, std::optional<CallingConv::ID>
CC)
const {
1583 if (ValueVT.
getSizeInBits() == 128 && NumParts == 1 && PartVT == MVT::Untyped) {
1594 MVT PartVT,
EVT ValueVT, std::optional<CallingConv::ID>
CC)
const {
1595 if (ValueVT.
getSizeInBits() == 128 && NumParts == 1 && PartVT == MVT::Untyped) {
1622 unsigned NumFixedGPRs = 0;
1623 unsigned NumFixedFPRs = 0;
1624 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
1637 RC = &SystemZ::GR32BitRegClass;
1641 RC = &SystemZ::GR64BitRegClass;
1645 RC = &SystemZ::FP32BitRegClass;
1649 RC = &SystemZ::FP64BitRegClass;
1653 RC = &SystemZ::FP128BitRegClass;
1661 RC = &SystemZ::VR128BitRegClass;
1690 ArgValue = DAG.
getLoad(LocVT,
DL, Chain, FIN,
1701 unsigned ArgIndex = Ins[
I].OrigArgIndex;
1702 assert (Ins[
I].PartOffset == 0);
1703 while (
I + 1 != E && Ins[
I + 1].OrigArgIndex == ArgIndex) {
1705 unsigned PartOffset = Ins[
I + 1].PartOffset;
1728 int64_t VarArgOffset = CCInfo.
getStackSize() + Regs->getCallFrameSize();
1746 int64_t RegSaveOffset =
1761 &SystemZ::FP64BitRegClass);
1779 MRI.addLiveIn(Regs->getADARegister(), ADAvReg);
1791 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
1798 if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D)
1800 if (Outs[
I].Flags.isSwiftSelf() || Outs[
I].Flags.isSwiftError())
1807 unsigned Offset,
bool LoadAdr =
false) {
1830 bool LoadAddr =
false;
1831 const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV);
1852 unsigned ADADelta = 0;
1853 unsigned EPADelta = 8;
1858 if (
auto *
G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1859 bool IsInternal = (
G->getGlobal()->hasInternalLinkage() ||
1860 G->getGlobal()->hasPrivateLinkage());
1875 }
else if (
auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1937 for (
unsigned I = 0, E = ArgLocs.
size();
I != E; ++
I) {
1943 unsigned ArgIndex = Outs[
I].OrigArgIndex;
1945 if (
I + 1 != E && Outs[
I + 1].OrigArgIndex == ArgIndex) {
1947 Type *OrigArgType = CLI.
Args[Outs[
I].OrigArgIndex].Ty;
1953 SlotVT = Outs[
I].VT;
1956 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
1962 assert (Outs[
I].PartOffset == 0);
1963 while (
I + 1 != E && Outs[
I + 1].OrigArgIndex == ArgIndex) {
1964 SDValue PartValue = OutVals[
I + 1];
1965 unsigned PartOffset = Outs[
I + 1].PartOffset;
1972 SlotVT.
getStoreSize()) &&
"Not enough space for argument part!");
1975 ArgValue = SpillSlot;
1992 if (!StackPtr.getNode())
2014 RegsToPass.
push_back(std::make_pair(SystemZ::R3D, ShadowArgValue));
2020 if (!MemOpChains.
empty())
2033 ->getAddressOfCalleeRegister();
2036 Callee = DAG.
getRegister(CalleeReg, Callee.getValueType());
2041 if (
auto *
G = dyn_cast<GlobalAddressSDNode>(Callee)) {
2044 }
else if (
auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
2047 }
else if (IsTailCall) {
2050 Callee = DAG.
getRegister(SystemZ::R1D, Callee.getValueType());
2055 for (
unsigned I = 0, E = RegsToPass.
size();
I != E; ++
I) {
2057 RegsToPass[
I].second, Glue);
2068 for (
unsigned I = 0, E = RegsToPass.
size();
I != E; ++
I)
2070 RegsToPass[
I].second.getValueType()));
2074 const uint32_t *Mask =
TRI->getCallPreservedMask(MF, CallConv);
2075 assert(Mask &&
"Missing call preserved mask for calling convention");
2099 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, Ctx);
2103 for (
unsigned I = 0, E = RetLocs.
size();
I != E; ++
I) {
2125 bool DoesNotReturn,
bool IsReturnValueUsed)
const {
2127 Args.reserve(Ops.
size());
2132 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.
getContext());
2135 Args.push_back(Entry);
2161 for (
auto &Out : Outs)
2162 if (Out.ArgVT == MVT::i128)
2167 return RetCCInfo.
CheckReturn(Outs, RetCC_SystemZ);
2184 if (RetLocs.
empty())
2194 for (
unsigned I = 0, E = RetLocs.
size();
I != E; ++
I) {
2223 unsigned &CCValid) {
2224 unsigned Id =
Op.getConstantOperandVal(1);
2226 case Intrinsic::s390_tbegin:
2231 case Intrinsic::s390_tbegin_nofloat:
2236 case Intrinsic::s390_tend:
2250 unsigned Id =
Op.getConstantOperandVal(0);
2252 case Intrinsic::s390_vpkshs:
2253 case Intrinsic::s390_vpksfs:
2254 case Intrinsic::s390_vpksgs:
2259 case Intrinsic::s390_vpklshs:
2260 case Intrinsic::s390_vpklsfs:
2261 case Intrinsic::s390_vpklsgs:
2266 case Intrinsic::s390_vceqbs:
2267 case Intrinsic::s390_vceqhs:
2268 case Intrinsic::s390_vceqfs:
2269 case Intrinsic::s390_vceqgs:
2274 case Intrinsic::s390_vchbs:
2275 case Intrinsic::s390_vchhs:
2276 case Intrinsic::s390_vchfs:
2277 case Intrinsic::s390_vchgs:
2282 case Intrinsic::s390_vchlbs:
2283 case Intrinsic::s390_vchlhs:
2284 case Intrinsic::s390_vchlfs:
2285 case Intrinsic::s390_vchlgs:
2290 case Intrinsic::s390_vtm:
2295 case Intrinsic::s390_vfaebs:
2296 case Intrinsic::s390_vfaehs:
2297 case Intrinsic::s390_vfaefs:
2302 case Intrinsic::s390_vfaezbs:
2303 case Intrinsic::s390_vfaezhs:
2304 case Intrinsic::s390_vfaezfs:
2309 case Intrinsic::s390_vfeebs:
2310 case Intrinsic::s390_vfeehs:
2311 case Intrinsic::s390_vfeefs:
2316 case Intrinsic::s390_vfeezbs:
2317 case Intrinsic::s390_vfeezhs:
2318 case Intrinsic::s390_vfeezfs:
2323 case Intrinsic::s390_vfenebs:
2324 case Intrinsic::s390_vfenehs:
2325 case Intrinsic::s390_vfenefs:
2330 case Intrinsic::s390_vfenezbs:
2331 case Intrinsic::s390_vfenezhs:
2332 case Intrinsic::s390_vfenezfs:
2337 case Intrinsic::s390_vistrbs:
2338 case Intrinsic::s390_vistrhs:
2339 case Intrinsic::s390_vistrfs:
2344 case Intrinsic::s390_vstrcbs:
2345 case Intrinsic::s390_vstrchs:
2346 case Intrinsic::s390_vstrcfs:
2351 case Intrinsic::s390_vstrczbs:
2352 case Intrinsic::s390_vstrczhs:
2353 case Intrinsic::s390_vstrczfs:
2358 case Intrinsic::s390_vstrsb:
2359 case Intrinsic::s390_vstrsh:
2360 case Intrinsic::s390_vstrsf:
2365 case Intrinsic::s390_vstrszb:
2366 case Intrinsic::s390_vstrszh:
2367 case Intrinsic::s390_vstrszf:
2372 case Intrinsic::s390_vfcedbs:
2373 case Intrinsic::s390_vfcesbs:
2378 case Intrinsic::s390_vfchdbs:
2379 case Intrinsic::s390_vfchsbs:
2384 case Intrinsic::s390_vfchedbs:
2385 case Intrinsic::s390_vfchesbs:
2390 case Intrinsic::s390_vftcidb:
2391 case Intrinsic::s390_vftcisb:
2396 case Intrinsic::s390_tdc:
2414 for (
unsigned I = 2;
I < NumOps; ++
I)
2417 assert(
Op->getNumValues() == 2 &&
"Expected only CC result and chain");
2423 return Intr.getNode();
2433 for (
unsigned I = 1;
I < NumOps; ++
I)
2437 return Intr.getNode();
2447 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \
2448 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \
2449 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X
2474 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1.getNode());
2475 if (!ConstOp1 || ConstOp1->getValueSizeInBits(0) > 64)
2478 int64_t
Value = ConstOp1->getSExtValue();
2494 if (!
C.Op0.hasOneUse() ||
2500 auto *Load = cast<LoadSDNode>(
C.Op0);
2501 unsigned NumBits = Load->getMemoryVT().getSizeInBits();
2502 if ((NumBits != 8 && NumBits != 16) ||
2503 NumBits != Load->getMemoryVT().getStoreSizeInBits())
2508 auto *ConstOp1 = cast<ConstantSDNode>(
C.Op1);
2509 if (!ConstOp1 || ConstOp1->getValueSizeInBits(0) > 64)
2512 uint64_t Mask = (1 << NumBits) - 1;
2515 int64_t SignedValue = ConstOp1->getSExtValue();
2522 }
else if (NumBits == 8) {
2548 if (
C.Op0.getValueType() != MVT::i32 ||
2549 Load->getExtensionType() != ExtType) {
2551 Load->getBasePtr(), Load->getPointerInfo(),
2552 Load->getMemoryVT(), Load->getAlign(),
2553 Load->getMemOperand()->getFlags());
2559 if (
C.Op1.getValueType() != MVT::i32 ||
2560 Value != ConstOp1->getZExtValue())
2567 auto *Load = dyn_cast<LoadSDNode>(
Op.getNode());
2570 if (Load->getMemoryVT() == MVT::i8)
2573 switch (Load->getExtensionType()) {
2590 if (
C.Op0.getValueType() == MVT::i128)
2592 if (
C.Op0.getValueType() == MVT::f128)
2598 if (isa<ConstantFPSDNode>(
C.Op1))
2603 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1);
2604 if (ConstOp1 && ConstOp1->getZExtValue() == 0)
2622 isUInt<16>(ConstOp1->getZExtValue()))
2627 isInt<16>(ConstOp1->getSExtValue()))
2633 unsigned Opcode0 =
C.Op0.getOpcode();
2640 C.Op0.getConstantOperandVal(1) == 0xffffffff)
2655 ((
N->getOperand(0) ==
C.Op0 &&
N->getOperand(1) ==
C.Op1) ||
2656 (
N->getOperand(0) ==
C.Op1 &&
N->getOperand(1) ==
C.Op0))) {
2660 Flags.setNoSignedWrap(
false);
2661 Flags.setNoUnsignedWrap(
false);
2680 auto *C1 = dyn_cast<ConstantFPSDNode>(
C.Op1);
2681 if (C1 && C1->isZero()) {
2700 if (
C.Op0.getOpcode() ==
ISD::SHL &&
C.Op0.getValueType() == MVT::i64 &&
2702 auto *C1 = dyn_cast<ConstantSDNode>(
C.Op0.getOperand(1));
2703 if (C1 && C1->getZExtValue() == 32) {
2704 SDValue ShlOp0 =
C.Op0.getOperand(0);
2708 cast<VTSDNode>(
N->getOperand(1))->getVT() == MVT::i32) {
2723 C.Op0.getOperand(0).getOpcode() ==
ISD::LOAD &&
2725 cast<ConstantSDNode>(
C.Op1)->getValueSizeInBits(0) <= 64 &&
2726 C.Op1->getAsZExtVal() == 0) {
2727 auto *L = cast<LoadSDNode>(
C.Op0.getOperand(0));
2728 if (L->getMemoryVT().getStoreSizeInBits().getFixedValue() <=
2729 C.Op0.getValueSizeInBits().getFixedValue()) {
2730 unsigned Type = L->getExtensionType();
2733 C.Op0 =
C.Op0.getOperand(0);
2743 auto *Shift = dyn_cast<ConstantSDNode>(
N.getOperand(1));
2747 uint64_t Amount = Shift->getZExtValue();
2748 if (Amount >=
N.getValueSizeInBits())
2763 unsigned ICmpType) {
2764 assert(Mask != 0 &&
"ANDs with zero should have been removed by now");
2786 if (EffectivelyUnsigned && CmpVal > 0 && CmpVal <=
Low) {
2792 if (EffectivelyUnsigned && CmpVal <
Low) {
2800 if (CmpVal == Mask) {
2806 if (EffectivelyUnsigned && CmpVal >= Mask -
Low && CmpVal < Mask) {
2812 if (EffectivelyUnsigned && CmpVal > Mask -
Low && CmpVal <= Mask) {
2820 if (EffectivelyUnsigned && CmpVal >= Mask -
High && CmpVal <
High) {
2826 if (EffectivelyUnsigned && CmpVal > Mask -
High && CmpVal <=
High) {
2855 if (
C.Op0.getValueType() == MVT::i128) {
2860 auto *Mask = dyn_cast<ConstantSDNode>(
C.Op1);
2861 if (Mask && Mask->getAPIntValue() == 0) {
2876 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1);
2879 uint64_t CmpVal = ConstOp1->getZExtValue();
2886 NewC.Op0 =
C.Op0.getOperand(0);
2887 NewC.Op1 =
C.Op0.getOperand(1);
2888 Mask = dyn_cast<ConstantSDNode>(NewC.Op1);
2891 MaskVal = Mask->getZExtValue();
2896 if (NewC.Op0.getValueType() != MVT::i64 ||
2911 MaskVal = -(CmpVal & -CmpVal);
2919 unsigned BitSize = NewC.Op0.getValueSizeInBits();
2920 unsigned NewCCMask, ShiftVal;
2922 NewC.Op0.getOpcode() ==
ISD::SHL &&
2924 (MaskVal >> ShiftVal != 0) &&
2925 ((CmpVal >> ShiftVal) << ShiftVal) == CmpVal &&
2927 MaskVal >> ShiftVal,
2930 NewC.Op0 = NewC.Op0.getOperand(0);
2931 MaskVal >>= ShiftVal;
2933 NewC.Op0.getOpcode() ==
ISD::SRL &&
2935 (MaskVal << ShiftVal != 0) &&
2936 ((CmpVal << ShiftVal) >> ShiftVal) == CmpVal &&
2938 MaskVal << ShiftVal,
2941 NewC.Op0 = NewC.Op0.getOperand(0);
2942 MaskVal <<= ShiftVal;
2953 if (Mask && Mask->getZExtValue() == MaskVal)
2958 C.CCMask = NewCCMask;
2966 if (
C.Op0.getValueType() != MVT::i128)
2984 bool Swap =
false, Invert =
false;
3003 C.CCMask ^=
C.CCValid;
3013 auto *Mask = dyn_cast<ConstantSDNode>(
C.Op0.getOperand(1));
3014 if (!Mask || Mask->getValueSizeInBits(0) > 64)
3017 if ((~Known.
Zero).getZExtValue() & ~Mask->getZExtValue())
3020 C.Op0 =
C.Op0.getOperand(0);
3032 C.CCValid = CCValid;
3035 C.CCMask =
CC < 4 ? 1 << (3 -
CC) : 0;
3038 C.CCMask =
CC < 4 ? ~(1 << (3 -
CC)) : -1;
3042 C.CCMask =
CC < 4 ? ~0U << (4 -
CC) : -1;
3045 C.CCMask =
CC < 4 ? ~(~0U << (4 -
CC)) : 0;
3049 C.CCMask =
CC < 4 ? ~0U << (3 -
CC) : -1;
3052 C.CCMask =
CC < 4 ? ~(~0U << (3 -
CC)) : 0;
3055 C.CCMask &= CCValid;
3063 bool IsSignaling =
false) {
3066 unsigned Opcode, CCValid;
3078 Comparison
C(CmpOp0, CmpOp1, Chain);
3080 if (
C.Op0.getValueType().isFloatingPoint()) {
3084 else if (!IsSignaling)
3106 C.CCMask &= ~SystemZ::CCMASK_CMP_UO;
3127 if (!
C.Op1.getNode()) {
3129 switch (
C.Op0.getOpcode()) {
3156 return DAG.
getNode(
C.Opcode,
DL, VTs,
C.Chain,
C.Op0,
C.Op1);
3158 return DAG.
getNode(
C.Opcode,
DL, MVT::i32,
C.Op0,
C.Op1);
3167 Op0 = DAG.
getNode(Extend,
DL, MVT::i64, Op0);
3168 Op1 = DAG.
getNode(Extend,
DL, MVT::i64, Op1);
3193 unsigned CCValid,
unsigned CCMask) {
3222 case CmpMode::Int:
return 0;
3242 case CmpMode::FP:
return 0;
3243 case CmpMode::StrictFP:
return 0;
3244 case CmpMode::SignalingFP:
return 0;
3276 int Mask[] = { Start, -1, Start + 1, -1 };
3296 !Subtarget.hasVectorEnhancements1()) {
3310 SDValue Ops[2] = { Res, NewChain };
3319 return DAG.
getNode(Opcode,
DL, VTs, Chain, CmpOp0, CmpOp1);
3321 return DAG.
getNode(Opcode,
DL, VT, CmpOp0, CmpOp1);
3334 bool IsSignaling)
const {
3337 assert (!IsSignaling || Chain);
3338 CmpMode Mode = IsSignaling ? CmpMode::SignalingFP :
3339 Chain ? CmpMode::StrictFP : IsFP ? CmpMode::FP : CmpMode::Int;
3340 bool Invert =
false;
3348 assert(IsFP &&
"Unexpected integer comparison");
3350 DL, VT, CmpOp1, CmpOp0, Chain);
3352 DL, VT, CmpOp0, CmpOp1, Chain);
3356 LT.getValue(1),
GE.getValue(1));
3365 assert(IsFP &&
"Unexpected integer comparison");
3367 DL, VT, CmpOp1, CmpOp0, Chain);
3369 DL, VT, CmpOp0, CmpOp1, Chain);
3373 LT.getValue(1),
GT.getValue(1));
3382 Cmp = getVectorCmp(DAG, Opcode,
DL, VT, CmpOp0, CmpOp1, Chain);
3386 Cmp = getVectorCmp(DAG, Opcode,
DL, VT, CmpOp1, CmpOp0, Chain);
3391 Chain =
Cmp.getValue(1);
3399 if (Chain && Chain.
getNode() !=
Cmp.getNode()) {
3412 EVT VT =
Op.getValueType();
3414 return lowerVectorSETCC(DAG,
DL, VT,
CC, CmpOp0, CmpOp1);
3416 Comparison
C(
getCmp(DAG, CmpOp0, CmpOp1,
CC,
DL));
3423 bool IsSignaling)
const {
3429 EVT VT =
Op.getNode()->getValueType(0);
3431 SDValue Res = lowerVectorSETCC(DAG,
DL, VT,
CC, CmpOp0, CmpOp1,
3432 Chain, IsSignaling);
3436 Comparison
C(
getCmp(DAG, CmpOp0, CmpOp1,
CC,
DL, Chain, IsSignaling));
3451 Comparison
C(
getCmp(DAG, CmpOp0, CmpOp1,
CC,
DL));
3488 Comparison
C(
getCmp(DAG, CmpOp0, CmpOp1,
CC,
DL));
3496 cast<ConstantSDNode>(
C.Op1)->getValueSizeInBits(0) <= 64 &&
3497 C.Op1->getAsZExtVal() == 0) {
3505 SDValue Ops[] = {TrueOp, FalseOp,
3579 Chain = DAG.
getCopyToReg(Chain,
DL, SystemZ::R2D, GOTOffset, Glue);
3586 Node->getValueType(0),
3598 assert(Mask &&
"Missing call preserved mask for calling convention");
3606 Chain = DAG.
getNode(Opcode,
DL, NodeTys, Ops);
3613SDValue SystemZTargetLowering::lowerThreadPointer(
const SDLoc &
DL,
3645 SDValue TP = lowerThreadPointer(
DL, DAG);
3753 if (
CP->isMachineConstantPoolEntry())
3772 unsigned Depth =
Op.getConstantOperandVal(0);
3779 int BackChainIdx = TFL->getOrCreateFramePointerSaveIndex(MF);
3808 unsigned Depth =
Op.getConstantOperandVal(0);
3816 SDValue FrameAddr = lowerFRAMEADDR(
Op, DAG);
3818 int Offset = TFL->getReturnAddressOffset(MF);
3829 &SystemZ::GR64BitRegClass);
3837 EVT InVT =
In.getValueType();
3838 EVT ResVT =
Op.getValueType();
3843 if (
auto *LoadN = dyn_cast<LoadSDNode>(In))
3846 LoadN->getBasePtr(), LoadN->getMemOperand());
3852 if (InVT == MVT::i32 && ResVT == MVT::f32) {
3854 if (Subtarget.hasHighWord()) {
3858 MVT::i64,
SDValue(U64, 0), In);
3866 DL, MVT::f32, Out64);
3868 if (InVT == MVT::f32 && ResVT == MVT::i32) {
3871 MVT::f64,
SDValue(U64, 0), In);
3873 if (Subtarget.hasHighWord())
3887 return lowerVASTART_XPLINK(
Op, DAG);
3889 return lowerVASTART_ELF(
Op, DAG);
3904 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
3918 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
3922 const unsigned NumFields = 4;
3933 for (
unsigned I = 0;
I < NumFields; ++
I) {
3938 MemOps[
I] = DAG.
getStore(Chain,
DL, Fields[
I], FieldAddr,
3950 const Value *DstSV = cast<SrcValueSDNode>(
Op.getOperand(3))->getValue();
3951 const Value *SrcSV = cast<SrcValueSDNode>(
Op.getOperand(4))->getValue();
3957 Align(8),
false,
false,
3963SystemZTargetLowering::lowerDYNAMIC_STACKALLOC(
SDValue Op,
3966 return lowerDYNAMIC_STACKALLOC_XPLINK(
Op, DAG);
3968 return lowerDYNAMIC_STACKALLOC_ELF(
Op, DAG);
3972SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_XPLINK(
SDValue Op,
3984 uint64_t AlignVal = (RealignOpt ?
Align->getAsZExtVal() : 0);
3987 uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
3988 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;
3994 if (ExtraAlignSpace)
3998 bool IsSigned =
false;
3999 bool DoesNotReturn =
false;
4000 bool IsReturnValueUsed =
false;
4001 EVT VT =
Op.getValueType();
4012 Register SPReg = Regs.getStackPointerRegister();
4023 if (ExtraAlignSpace) {
4035SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_ELF(
SDValue Op,
4049 uint64_t AlignVal = (RealignOpt ?
Align->getAsZExtVal() : 0);
4052 uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
4053 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;
4064 Backchain = DAG.
getLoad(MVT::i64,
DL, Chain, getBackchainAddress(OldSP, DAG),
4068 if (ExtraAlignSpace)
4076 DAG.
getVTList(MVT::i64, MVT::Other), Chain, OldSP, NeededSpace);
4092 if (RequiredAlign > StackAlign) {
4102 Chain = DAG.
getStore(Chain,
DL, Backchain, getBackchainAddress(NewSP, DAG),
4109SDValue SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET(
4118 EVT VT =
Op.getValueType();
4125 Op.getOperand(1), Ops[1], Ops[0]);
4126 else if (Subtarget.hasMiscellaneousExtensions2())
4131 Op.getOperand(0),
Op.getOperand(1), Ops[1], Ops[0]);
4155 LL, RL, Ops[1], Ops[0]);
4166 EVT VT =
Op.getValueType();
4173 Op.getOperand(1), Ops[1], Ops[0]);
4179 Op.getOperand(0),
Op.getOperand(1), Ops[1], Ops[0]);
4187 EVT VT =
Op.getValueType();
4207 EVT VT =
Op.getValueType();
4214 Op.getOperand(0),
Op.getOperand(1), Ops[1], Ops[0]);
4219 assert(
Op.getValueType() == MVT::i64 &&
"Should be 64-bit operation");
4222 SDValue Ops[] = {
Op.getOperand(0),
Op.getOperand(1)};
4231 if ((Masks[0] >> 32) == 0xffffffff &&
uint32_t(Masks[1]) == 0xffffffff)
4233 else if ((Masks[1] >> 32) == 0xffffffff &&
uint32_t(Masks[0]) == 0xffffffff)
4249 if (!isInt<16>(
Value))
4270 MVT::i64, HighOp, Low32);
4281 if (
N->getValueType(0) == MVT::i128) {
4282 unsigned BaseOp = 0;
4283 unsigned FlagOp = 0;
4284 bool IsBorrow =
false;
4285 switch (
Op.getOpcode()) {
4308 unsigned BaseOp = 0;
4309 unsigned CCValid = 0;
4310 unsigned CCMask = 0;
4312 switch (
Op.getOpcode()) {
4340 if (
N->getValueType(1) == MVT::i1)
4363 MVT VT =
N->getSimpleValueType(0);
4374 if (VT == MVT::i128) {
4375 unsigned BaseOp = 0;
4376 unsigned FlagOp = 0;
4377 bool IsBorrow =
false;
4378 switch (
Op.getOpcode()) {
4405 unsigned BaseOp = 0;
4406 unsigned CCValid = 0;
4407 unsigned CCMask = 0;
4409 switch (
Op.getOpcode()) {
4438 if (
N->getValueType(1) == MVT::i1)
4446 EVT VT =
Op.getValueType();
4448 Op =
Op.getOperand(0);
4496 if (NumSignificantBits == 0)
4502 BitSize = std::min(BitSize, OrigBitSize);
4511 for (int64_t
I = BitSize / 2;
I >= 8;
I =
I / 2) {
4513 if (BitSize != OrigBitSize)
4550 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
4552 (
Node->getMemoryVT() == MVT::i128 ||
Node->getMemoryVT() == MVT::f128) &&
4553 "Only custom lowering i128 or f128.");
4565 EVT PtrVT =
Addr.getValueType();
4566 EVT WideVT = MVT::i32;
4589 unsigned Opcode)
const {
4590 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
4593 EVT NarrowVT =
Node->getMemoryVT();
4594 EVT WideVT = MVT::i32;
4595 if (NarrowVT == WideVT)
4607 if (
auto *Const = dyn_cast<ConstantSDNode>(Src2)) {
4612 SDValue AlignedAddr, BitShift, NegBitShift;
4630 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift,
4649 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
4650 EVT MemVT =
Node->getMemoryVT();
4651 if (MemVT == MVT::i32 || MemVT == MVT::i64) {
4653 assert(
Op.getValueType() == MemVT &&
"Mismatched VTs");
4654 assert(Subtarget.hasInterlockedAccess1() &&
4655 "Should have been expanded by AtomicExpand pass.");
4661 Node->getChain(),
Node->getBasePtr(), NegSrc2,
4662 Node->getMemOperand());
4671 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
4679 if (
Node->getMemoryVT() == MVT::i128) {
4688 EVT NarrowVT =
Node->getMemoryVT();
4689 EVT WideVT = NarrowVT == MVT::i64 ? MVT::i64 : MVT::i32;
4690 if (NarrowVT == WideVT) {
4692 SDValue Ops[] = { ChainIn,
Addr, CmpVal, SwapVal };
4694 DL, Tys, Ops, NarrowVT, MMO);
4708 SDValue AlignedAddr, BitShift, NegBitShift;
4713 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift,
4716 VTList, Ops, NarrowVT, MMO);
4730SystemZTargetLowering::getTargetMMOFlags(
const Instruction &
I)
const {
4735 if (
auto *SI = dyn_cast<StoreInst>(&
I))
4738 if (
auto *LI = dyn_cast<LoadInst>(&
I))
4741 if (
auto *AI = dyn_cast<AtomicRMWInst>(&
I))
4744 if (
auto *AI = dyn_cast<AtomicCmpXchgInst>(&
I))
4756 "in GHC calling convention");
4758 Regs->getStackPointerRegister(),
Op.getValueType());
4769 "in GHC calling convention");
4776 if (StoreBackchain) {
4778 Chain,
DL, Regs->getStackPointerRegister(), MVT::i64);
4779 Backchain = DAG.
getLoad(MVT::i64,
DL, Chain, getBackchainAddress(OldSP, DAG),
4783 Chain = DAG.
getCopyToReg(Chain,
DL, Regs->getStackPointerRegister(), NewSP);
4786 Chain = DAG.
getStore(Chain,
DL, Backchain, getBackchainAddress(NewSP, DAG),
4794 bool IsData =
Op.getConstantOperandVal(4);
4797 return Op.getOperand(0);
4800 bool IsWrite =
Op.getConstantOperandVal(2);
4802 auto *
Node = cast<MemIntrinsicSDNode>(
Op.getNode());
4806 Node->getVTList(), Ops,
4807 Node->getMemoryVT(),
Node->getMemOperand());
4819SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(
SDValue Op,
4821 unsigned Opcode, CCValid;
4823 assert(
Op->getNumValues() == 2 &&
"Expected only CC result and chain");
4834SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(
SDValue Op,
4836 unsigned Opcode, CCValid;
4839 if (
Op->getNumValues() == 1)
4841 assert(
Op->getNumValues() == 2 &&
"Expected a CC and non-CC result");
4846 unsigned Id =
Op.getConstantOperandVal(0);
4848 case Intrinsic::thread_pointer:
4849 return lowerThreadPointer(
SDLoc(
Op), DAG);
4851 case Intrinsic::s390_vpdi:
4853 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4855 case Intrinsic::s390_vperm:
4857 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4859 case Intrinsic::s390_vuphb:
4860 case Intrinsic::s390_vuphh:
4861 case Intrinsic::s390_vuphf:
4865 case Intrinsic::s390_vuplhb:
4866 case Intrinsic::s390_vuplhh:
4867 case Intrinsic::s390_vuplhf:
4871 case Intrinsic::s390_vuplb:
4872 case Intrinsic::s390_vuplhw:
4873 case Intrinsic::s390_vuplf:
4877 case Intrinsic::s390_vupllb:
4878 case Intrinsic::s390_vupllh:
4879 case Intrinsic::s390_vupllf:
4883 case Intrinsic::s390_vsumb:
4884 case Intrinsic::s390_vsumh:
4885 case Intrinsic::s390_vsumgh:
4886 case Intrinsic::s390_vsumgf:
4887 case Intrinsic::s390_vsumqf:
4888 case Intrinsic::s390_vsumqg:
4890 Op.getOperand(1),
Op.getOperand(2));
4892 case Intrinsic::s390_vaq:
4894 Op.getOperand(1),
Op.getOperand(2));
4895 case Intrinsic::s390_vaccb:
4896 case Intrinsic::s390_vacch:
4897 case Intrinsic::s390_vaccf:
4898 case Intrinsic::s390_vaccg:
4899 case Intrinsic::s390_vaccq:
4901 Op.getOperand(1),
Op.getOperand(2));
4902 case Intrinsic::s390_vacq:
4904 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4905 case Intrinsic::s390_vacccq:
4907 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4909 case Intrinsic::s390_vsq:
4911 Op.getOperand(1),
Op.getOperand(2));
4912 case Intrinsic::s390_vscbib:
4913 case Intrinsic::s390_vscbih:
4914 case Intrinsic::s390_vscbif:
4915 case Intrinsic::s390_vscbig:
4916 case Intrinsic::s390_vscbiq:
4918 Op.getOperand(1),
Op.getOperand(2));
4919 case Intrinsic::s390_vsbiq:
4921 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4922 case Intrinsic::s390_vsbcbiq:
4924 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4945 { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 } },
4948 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
4951 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
4954 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
4957 { 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 } },
4960 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
4963 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
4966 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
4969 { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 } },
4972 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
4975 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
4978 { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } },
4981 { 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 } }
4995 OpNo0 = OpNo1 = OpNos[1];
4996 }
else if (OpNos[1] < 0) {
4997 OpNo0 = OpNo1 = OpNos[0];
5015 unsigned &OpNo0,
unsigned &OpNo1) {
5016 int OpNos[] = { -1, -1 };
5029 if (OpNos[ModelOpNo] == 1 - RealOpNo)
5031 OpNos[ModelOpNo] = RealOpNo;
5039 unsigned &OpNo0,
unsigned &OpNo1) {
5056 int Elt = Bytes[
From];
5059 Transform[
From] = -1;
5061 while (
P.Bytes[To] != Elt) {
5066 Transform[
From] = To;
5089 if (
auto *VSN = dyn_cast<ShuffleVectorSDNode>(ShuffleOp)) {
5090 Bytes.
resize(NumElements * BytesPerElement, -1);
5091 for (
unsigned I = 0;
I < NumElements; ++
I) {
5092 int Index = VSN->getMaskElt(
I);
5094 for (
unsigned J = 0; J < BytesPerElement; ++J)
5095 Bytes[
I * BytesPerElement + J] =
Index * BytesPerElement + J;
5100 isa<ConstantSDNode>(ShuffleOp.
getOperand(1))) {
5102 Bytes.
resize(NumElements * BytesPerElement, -1);
5103 for (
unsigned I = 0;
I < NumElements; ++
I)
5104 for (
unsigned J = 0; J < BytesPerElement; ++J)
5105 Bytes[
I * BytesPerElement + J] =
Index * BytesPerElement + J;
5116 unsigned BytesPerElement,
int &
Base) {
5118 for (
unsigned I = 0;
I < BytesPerElement; ++
I) {
5119 if (Bytes[Start +
I] >= 0) {
5120 unsigned Elem = Bytes[Start +
I];
5124 if (
unsigned(
Base) % Bytes.
size() + BytesPerElement > Bytes.
size())
5126 }
else if (
unsigned(
Base) != Elem -
I)
5139 unsigned &StartIndex,
unsigned &OpNo0,
5141 int OpNos[] = { -1, -1 };
5143 for (
unsigned I = 0;
I < 16; ++
I) {
5150 Shift = ExpectedShift;
5151 else if (Shift != ExpectedShift)
5155 if (OpNos[ModelOpNo] == 1 - RealOpNo)
5157 OpNos[ModelOpNo] = RealOpNo;
5194 N =
N->getOperand(0);
5196 if (
auto *
Op = dyn_cast<ConstantSDNode>(
N->getOperand(0)))
5197 return Op->getZExtValue() == 0;
5203 for (
unsigned I = 0;
I < Num ;
I++)
5215 for (
unsigned I = 0;
I < 2; ++
I)
5219 unsigned StartIndex, OpNo0, OpNo1;
5228 if (ZeroVecIdx != UINT32_MAX) {
5229 bool MaskFirst =
true;
5234 if (OpNo == ZeroVecIdx &&
I == 0) {
5239 if (OpNo != ZeroVecIdx && Byte == 0) {
5246 if (ZeroIdx != -1) {
5249 if (Bytes[
I] >= 0) {
5252 if (OpNo == ZeroVecIdx)
5262 SDValue Src = ZeroVecIdx == 0 ? Ops[1] : Ops[0];
5280 (!Ops[1].
isUndef() ? Ops[1] : Ops[0]), Op2);
5285struct GeneralShuffle {
5286 GeneralShuffle(
EVT vt) : VT(vt), UnpackFromEltSize(UINT_MAX) {}
5290 void tryPrepareForUnpack();
5291 bool unpackWasPrepared() {
return UnpackFromEltSize <= 4; }
5306 unsigned UnpackFromEltSize;
5311void GeneralShuffle::addUndef() {
5313 for (
unsigned I = 0;
I < BytesPerElement; ++
I)
5314 Bytes.push_back(-1);
5323bool GeneralShuffle::add(
SDValue Op,
unsigned Elem) {
5329 EVT FromVT =
Op.getNode() ?
Op.getValueType() : VT;
5334 if (FromBytesPerElement < BytesPerElement)
5338 (FromBytesPerElement - BytesPerElement));
5341 while (
Op.getNode()) {
5343 Op =
Op.getOperand(0);
5359 }
else if (
Op.isUndef()) {
5368 for (; OpNo < Ops.size(); ++OpNo)
5369 if (Ops[OpNo] ==
Op)
5371 if (OpNo == Ops.size())
5376 for (
unsigned I = 0;
I < BytesPerElement; ++
I)
5377 Bytes.push_back(
Base +
I);
5386 if (Ops.size() == 0)
5390 tryPrepareForUnpack();
5393 if (Ops.size() == 1)
5394 Ops.push_back(DAG.
getUNDEF(MVT::v16i8));
5405 unsigned Stride = 1;
5406 for (; Stride * 2 < Ops.size(); Stride *= 2) {
5407 for (
unsigned I = 0;
I < Ops.size() - Stride;
I += Stride * 2) {
5408 SDValue SubOps[] = { Ops[
I], Ops[
I + Stride] };
5417 else if (OpNo ==
I + Stride)
5428 if (NewBytes[J] >= 0) {
5430 "Invalid double permute");
5433 assert(NewBytesMap[J] < 0 &&
"Invalid double permute");
5439 if (NewBytes[J] >= 0)
5447 Ops[1] = Ops[Stride];
5455 unsigned OpNo0, OpNo1;
5457 if (unpackWasPrepared() && Ops[1].
isUndef())
5459 else if (
const Permute *
P =
matchPermute(Bytes, OpNo0, OpNo1))
5464 Op = insertUnpackIfPrepared(DAG,
DL,
Op);
5471 dbgs() << Msg.c_str() <<
" { ";
5472 for (
unsigned i = 0; i < Bytes.
size(); i++)
5473 dbgs() << Bytes[i] <<
" ";
5481void GeneralShuffle::tryPrepareForUnpack() {
5483 if (ZeroVecOpNo == UINT32_MAX || Ops.size() == 1)
5488 if (Ops.size() > 2 &&
5493 UnpackFromEltSize = 1;
5494 for (; UnpackFromEltSize <= 4; UnpackFromEltSize *= 2) {
5495 bool MatchUnpack =
true;
5498 unsigned ToEltSize = UnpackFromEltSize * 2;
5499 bool IsZextByte = (Elt % ToEltSize) < UnpackFromEltSize;
5502 if (Bytes[Elt] != -1) {
5504 if (IsZextByte != (OpNo == ZeroVecOpNo)) {
5505 MatchUnpack =
false;
5511 if (Ops.size() == 2) {
5514 if (SrcBytes[i] != -1 && SrcBytes[i] % 16 !=
int(i)) {
5515 UnpackFromEltSize = UINT_MAX;
5522 if (UnpackFromEltSize > 4)
5525 LLVM_DEBUG(
dbgs() <<
"Preparing for final unpack of element size "
5526 << UnpackFromEltSize <<
". Zero vector is Op#" << ZeroVecOpNo
5528 dumpBytes(Bytes,
"Original Bytes vector:"););
5533 Elt += UnpackFromEltSize;
5534 for (
unsigned i = 0; i < UnpackFromEltSize; i++, Elt++,
B++)
5535 Bytes[
B] = Bytes[Elt];
5541 Ops.erase(&Ops[ZeroVecOpNo]);
5543 if (Bytes[
I] >= 0) {
5545 if (OpNo > ZeroVecOpNo)
5556 if (!unpackWasPrepared())
5558 unsigned InBits = UnpackFromEltSize * 8;
5562 unsigned OutBits = InBits * 2;
5571 if (!
Op.getOperand(
I).isUndef())
5587 if (
Value.isUndef())
5640 GeneralShuffle GS(VT);
5642 bool FoundOne =
false;
5643 for (
unsigned I = 0;
I < NumElements; ++
I) {
5646 Op =
Op.getOperand(0);
5649 unsigned Elem =
Op.getConstantOperandVal(1);
5650 if (!GS.add(
Op.getOperand(0), Elem))
5653 }
else if (
Op.isUndef()) {
5667 if (!ResidueOps.
empty()) {
5668 while (ResidueOps.
size() < NumElements)
5670 for (
auto &
Op : GS.Ops) {
5671 if (!
Op.getNode()) {
5677 return GS.getNode(DAG,
SDLoc(BVN));
5680bool SystemZTargetLowering::isVectorElementLoad(
SDValue Op)
const {
5681 if (
Op.getOpcode() ==
ISD::LOAD && cast<LoadSDNode>(
Op)->isUnindexed())
5683 if (
auto *AL = dyn_cast<AtomicSDNode>(
Op))
5697 unsigned int NumElements = Elems.
size();
5698 unsigned int Count = 0;
5699 for (
auto Elem : Elems) {
5700 if (!Elem.isUndef()) {
5703 else if (Elem != Single) {
5723 if (
Single.getNode() && (Count > 1 || isVectorElementLoad(Single)))
5727 bool AllLoads =
true;
5728 for (
auto Elem : Elems)
5729 if (!isVectorElementLoad(Elem)) {
5735 if (VT == MVT::v2i64 && !AllLoads)
5739 if (VT == MVT::v2f64 && !AllLoads)
5749 if (VT == MVT::v4f32 && !AllLoads) {
5763 DL, MVT::v2i64, Op01, Op23);
5771 unsigned NumConstants = 0;
5772 for (
unsigned I = 0;
I < NumElements; ++
I) {
5786 if (NumConstants > 0) {
5787 for (
unsigned I = 0;
I < NumElements; ++
I)
5798 std::map<const SDNode*, unsigned> UseCounts;
5799 SDNode *LoadMaxUses =
nullptr;
5800 for (
unsigned I = 0;
I < NumElements; ++
I)
5801 if (isVectorElementLoad(Elems[
I])) {
5802 SDNode *Ld = Elems[
I].getNode();
5804 if (LoadMaxUses ==
nullptr || UseCounts[LoadMaxUses] < UseCounts[Ld])
5807 if (LoadMaxUses !=
nullptr) {
5808 ReplicatedVal =
SDValue(LoadMaxUses, 0);
5812 unsigned I1 = NumElements / 2 - 1;
5813 unsigned I2 = NumElements - 1;
5814 bool Def1 = !Elems[
I1].isUndef();
5815 bool Def2 = !Elems[I2].isUndef();
5829 for (
unsigned I = 0;
I < NumElements; ++
I)
5830 if (!
Done[
I] && !Elems[
I].
isUndef() && Elems[
I] != ReplicatedVal)
5838 auto *BVN = cast<BuildVectorSDNode>(
Op.getNode());
5840 EVT VT =
Op.getValueType();
5842 if (BVN->isConstant()) {
5861 for (
unsigned I = 0;
I < NumElements; ++
I)
5862 Ops[
I] =
Op.getOperand(
I);
5863 return buildVector(DAG,
DL, VT, Ops);
5868 auto *VSN = cast<ShuffleVectorSDNode>(
Op.getNode());
5870 EVT VT =
Op.getValueType();
5873 if (VSN->isSplat()) {
5875 unsigned Index = VSN->getSplatIndex();
5877 "Splat index should be defined and in first operand");
5887 GeneralShuffle
GS(VT);
5888 for (
unsigned I = 0;
I < NumElements; ++
I) {
5889 int Elt = VSN->getMaskElt(
I);
5892 else if (!
GS.add(
Op.getOperand(
unsigned(Elt) / NumElements),
5893 unsigned(Elt) % NumElements))
5896 return GS.getNode(DAG,
SDLoc(VSN));
5915 EVT VT =
Op.getValueType();
5920 if (VT == MVT::v2f64 &&
5940SystemZTargetLowering::lowerEXTRACT_VECTOR_ELT(
SDValue Op,
5946 EVT VT =
Op.getValueType();
5950 if (
auto *CIndexN = dyn_cast<ConstantSDNode>(Op1)) {
5965SDValue SystemZTargetLowering::
5968 EVT OutVT =
Op.getValueType();
5978 }
while (FromBits != ToBits);
5983SDValue SystemZTargetLowering::
5987 EVT OutVT =
Op.getValueType();
5991 unsigned NumInPerOut = InNumElts / OutNumElts;
5997 unsigned ZeroVecElt = InNumElts;
5998 for (
unsigned PackedElt = 0; PackedElt < OutNumElts; PackedElt++) {
5999 unsigned MaskElt = PackedElt * NumInPerOut;
6000 unsigned End = MaskElt + NumInPerOut - 1;
6001 for (; MaskElt <
End; MaskElt++)
6002 Mask[MaskElt] = ZeroVecElt++;
6003 Mask[MaskElt] = PackedElt;
6010 unsigned ByScalar)
const {
6015 EVT VT =
Op.getValueType();
6019 if (
auto *BVN = dyn_cast<BuildVectorSDNode>(Op1)) {
6020 APInt SplatBits, SplatUndef;
6021 unsigned SplatBitSize;
6025 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs,
6026 ElemBitSize,
true) &&
6027 SplatBitSize == ElemBitSize) {
6030 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6039 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6045 if (
auto *VSN = dyn_cast<ShuffleVectorSDNode>(Op1)) {
6046 if (VSN->isSplat()) {
6048 unsigned Index = VSN->getSplatIndex();
6050 "Splat index should be defined and in first operand");
6057 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6069 MVT ResultVT =
Op.getSimpleValueType();
6071 unsigned Check =
Op.getConstantOperandVal(1);
6073 unsigned TDCMask = 0;
6107 int SPFI = cast<FrameIndexSDNode>(
StackPtr.getNode())->getIndex();
6118 return DAG.
getLoad(MVT::i64,
DL, Chain, StackPtr, MPI);
6123 switch (
Op.getOpcode()) {
6125 return lowerFRAMEADDR(
Op, DAG);
6127 return lowerRETURNADDR(
Op, DAG);
6129 return lowerBR_CC(
Op, DAG);
6131 return lowerSELECT_CC(
Op, DAG);
6133 return lowerSETCC(
Op, DAG);
6135 return lowerSTRICT_FSETCC(
Op, DAG,
false);
6137 return lowerSTRICT_FSETCC(
Op, DAG,
true);
6139 return lowerGlobalAddress(cast<GlobalAddressSDNode>(
Op), DAG);
6141 return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(
Op), DAG);
6143 return lowerBlockAddress(cast<BlockAddressSDNode>(
Op), DAG);
6145 return lowerJumpTable(cast<JumpTableSDNode>(
Op), DAG);
6147 return lowerConstantPool(cast<ConstantPoolSDNode>(
Op), DAG);
6149 return lowerBITCAST(
Op, DAG);
6151 return lowerVASTART(
Op, DAG);
6153 return lowerVACOPY(
Op, DAG);
6155 return lowerDYNAMIC_STACKALLOC(
Op, DAG);
6157 return lowerGET_DYNAMIC_AREA_OFFSET(
Op, DAG);
6159 return lowerSMUL_LOHI(
Op, DAG);
6161 return lowerUMUL_LOHI(
Op, DAG);
6163 return lowerSDIVREM(
Op, DAG);
6165 return lowerUDIVREM(
Op, DAG);
6170 return lowerXALUO(
Op, DAG);
6173 return lowerUADDSUBO_CARRY(
Op, DAG);
6175 return lowerOR(
Op, DAG);
6177 return lowerCTPOP(
Op, DAG);
6179 return lowerVECREDUCE_ADD(
Op, DAG);
6181 return lowerATOMIC_FENCE(
Op, DAG);
6186 return lowerATOMIC_LDST_I128(
Op, DAG);
6190 return lowerATOMIC_LOAD_SUB(
Op, DAG);
6208 return lowerATOMIC_CMP_SWAP(
Op, DAG);
6210 return lowerSTACKSAVE(
Op, DAG);
6212 return lowerSTACKRESTORE(
Op, DAG);
6214 return lowerPREFETCH(
Op, DAG);
6216 return lowerINTRINSIC_W_CHAIN(
Op, DAG);
6218 return lowerINTRINSIC_WO_CHAIN(
Op, DAG);
6220 return lowerBUILD_VECTOR(
Op, DAG);
6222 return lowerVECTOR_SHUFFLE(
Op, DAG);
6224 return lowerSCALAR_TO_VECTOR(
Op, DAG);
6226 return lowerINSERT_VECTOR_ELT(
Op, DAG);
6228 return lowerEXTRACT_VECTOR_ELT(
Op, DAG);
6230 return lowerSIGN_EXTEND_VECTOR_INREG(
Op, DAG);
6232 return lowerZERO_EXTEND_VECTOR_INREG(
Op, DAG);
6242 return lowerIS_FPCLASS(
Op, DAG);
6244 return lowerGET_ROUNDING(
Op, DAG);
6246 return lowerREADCYCLECOUNTER(
Op, DAG);
6260 &SystemZ::FP128BitRegClass);
6269 SystemZ::REG_SEQUENCE, SL, MVT::f128,
6284 &SystemZ::FP128BitRegClass);
6302 switch (
N->getOpcode()) {
6306 SDValue Ops[] = {
N->getOperand(0),
N->getOperand(1) };
6309 DL, Tys, Ops, MVT::i128, MMO);
6312 if (
N->getValueType(0) == MVT::f128)
6326 SDValue Ops[] = {
N->getOperand(0), Val,
N->getOperand(2)};
6329 DL, Tys, Ops, MVT::i128, MMO);
6332 if (cast<AtomicSDNode>(
N)->getSuccessOrdering() ==
6335 MVT::Other, Res), 0);
6342 SDValue Ops[] = {
N->getOperand(0),
N->getOperand(1),
6347 DL, Tys, Ops, MVT::i128, MMO);
6358 if (
N->getValueType(0) == MVT::i128 && Src.getValueType() == MVT::f128 &&
6378#define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME
6489 OPCODE(ATOMIC_LOADW_ADD);
6490 OPCODE(ATOMIC_LOADW_SUB);
6491 OPCODE(ATOMIC_LOADW_AND);
6493 OPCODE(ATOMIC_LOADW_XOR);
6494 OPCODE(ATOMIC_LOADW_NAND);
6495 OPCODE(ATOMIC_LOADW_MIN);
6496 OPCODE(ATOMIC_LOADW_MAX);
6497 OPCODE(ATOMIC_LOADW_UMIN);
6498 OPCODE(ATOMIC_LOADW_UMAX);
6499 OPCODE(ATOMIC_CMP_SWAPW);
6502 OPCODE(ATOMIC_STORE_128);
6503 OPCODE(ATOMIC_CMP_SWAP_128);
6518bool SystemZTargetLowering::canTreatAsByteVector(
EVT VT)
const {
6519 if (!Subtarget.hasVector())
6533 DAGCombinerInfo &DCI,
6541 unsigned Opcode =
Op.getOpcode();
6544 Op =
Op.getOperand(0);
6546 canTreatAsByteVector(
Op.getValueType())) {
6555 BytesPerElement,
First))
6562 if (Byte % BytesPerElement != 0)
6565 Index = Byte / BytesPerElement;
6569 canTreatAsByteVector(
Op.getValueType())) {
6572 EVT OpVT =
Op.getValueType();
6574 if (OpBytesPerElement < BytesPerElement)
6578 unsigned End = (
Index + 1) * BytesPerElement;
6579 if (
End % OpBytesPerElement != 0)
6582 Op =
Op.getOperand(
End / OpBytesPerElement - 1);
6583 if (!
Op.getValueType().isInteger()) {
6586 DCI.AddToWorklist(
Op.getNode());
6591 DCI.AddToWorklist(
Op.getNode());
6598 canTreatAsByteVector(
Op.getValueType()) &&
6599 canTreatAsByteVector(
Op.getOperand(0).getValueType())) {
6601 EVT ExtVT =
Op.getValueType();
6602 EVT OpVT =
Op.getOperand(0).getValueType();
6605 unsigned Byte =
Index * BytesPerElement;
6606 unsigned SubByte =
Byte % ExtBytesPerElement;
6607 unsigned MinSubByte = ExtBytesPerElement - OpBytesPerElement;
6608 if (SubByte < MinSubByte ||
6609 SubByte + BytesPerElement > ExtBytesPerElement)
6612 Byte =
Byte / ExtBytesPerElement * OpBytesPerElement;
6614 Byte += SubByte - MinSubByte;
6615 if (Byte % BytesPerElement != 0)
6617 Op =
Op.getOperand(0);
6624 if (
Op.getValueType() != VecVT) {
6626 DCI.AddToWorklist(
Op.getNode());
6636SDValue SystemZTargetLowering::combineTruncateExtract(
6645 if (canTreatAsByteVector(VecVT)) {
6646 if (
auto *IndexN = dyn_cast<ConstantSDNode>(
Op.getOperand(1))) {
6649 if (BytesPerElement % TruncBytes == 0) {
6655 unsigned Scale = BytesPerElement / TruncBytes;
6656 unsigned NewIndex = (IndexN->getZExtValue() + 1) * Scale - 1;
6662 EVT ResVT = (TruncBytes < 4 ? MVT::i32 : TruncVT);
6663 return combineExtract(
DL, ResVT, VecVT, Vec, NewIndex, DCI,
true);
6671SDValue SystemZTargetLowering::combineZERO_EXTEND(
6672 SDNode *
N, DAGCombinerInfo &DCI)
const {
6676 EVT VT =
N->getValueType(0);
6678 auto *TrueOp = dyn_cast<ConstantSDNode>(N0.
getOperand(0));
6679 auto *FalseOp = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
6680 if (TrueOp && FalseOp) {
6690 DCI.CombineTo(N0.
getNode(), TruncSelect);
6720SDValue SystemZTargetLowering::combineSIGN_EXTEND_INREG(
6721 SDNode *
N, DAGCombinerInfo &DCI)
const {
6727 EVT VT =
N->getValueType(0);
6728 EVT EVT = cast<VTSDNode>(
N->getOperand(1))->getVT();
6741SDValue SystemZTargetLowering::combineSIGN_EXTEND(
6742 SDNode *
N, DAGCombinerInfo &DCI)
const {
6748 EVT VT =
N->getValueType(0);
6750 auto *SraAmt = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
6753 if (
auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.
getOperand(1))) {
6755 unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra;
6756 unsigned NewSraAmt = SraAmt->getZExtValue() + Extra;
6772SDValue SystemZTargetLowering::combineMERGE(
6773 SDNode *
N, DAGCombinerInfo &DCI)
const {
6775 unsigned Opcode =
N->getOpcode();
6783 if (Op1 ==
N->getOperand(0))
6788 if (ElemBytes <= 4) {
6796 DCI.AddToWorklist(Op1.
getNode());
6799 DCI.AddToWorklist(
Op.getNode());
6808 LoPart = HiPart =
nullptr;
6812 UI != UIEnd; ++UI) {
6814 if (UI.getUse().getResNo() != 0)
6819 bool IsLoPart =
true;
6844 LoPart = HiPart =
nullptr;
6848 UI != UIEnd; ++UI) {
6850 if (UI.getUse().getResNo() != 0)
6856 User->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
6859 switch (
User->getConstantOperandVal(1)) {
6860 case SystemZ::subreg_l64:
6865 case SystemZ::subreg_h64:
6877SDValue SystemZTargetLowering::combineLOAD(
6878 SDNode *
N, DAGCombinerInfo &DCI)
const {
6880 EVT LdVT =
N->getValueType(0);
6895 LD->getPointerInfo(),
LD->getOriginalAlign(),
6896 LD->getMemOperand()->getFlags(),
LD->getAAInfo());
6898 DCI.CombineTo(HiPart, EltLoad,
true);
6905 LD->getPointerInfo().getWithOffset(8),
LD->getOriginalAlign(),
6906 LD->getMemOperand()->getFlags(),
LD->getAAInfo());
6908 DCI.CombineTo(LoPart, EltLoad,
true);
6915 DCI.AddToWorklist(Chain.
getNode());
6937 else if (UI.getUse().getResNo() == 0)
6940 if (!Replicate || OtherUses.
empty())
6946 for (
SDNode *U : OtherUses) {
6955bool SystemZTargetLowering::canLoadStoreByteSwapped(
EVT VT)
const {
6956 if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64)
6958 if (Subtarget.hasVectorEnhancements2())
6959 if (VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v2i64 || VT == MVT::i128)
6971 for (
unsigned i = 0; i < NumElts; ++i) {
6972 if (M[i] < 0)
continue;
6973 if ((
unsigned) M[i] != NumElts - 1 - i)
6981 for (
auto *U : StoredVal->
uses()) {
6983 EVT CurrMemVT = ST->getMemoryVT().getScalarType();
6986 }
else if (isa<BuildVectorSDNode>(U)) {
7042SDValue SystemZTargetLowering::combineSTORE(
7043 SDNode *
N, DAGCombinerInfo &DCI)
const {
7045 auto *SN = cast<StoreSDNode>(
N);
7046 auto &Op1 =
N->getOperand(1);
7047 EVT MemVT = SN->getMemoryVT();
7052 if (MemVT.
isInteger() && SN->isTruncatingStore()) {
7054 combineTruncateExtract(
SDLoc(
N), MemVT, SN->getValue(), DCI)) {
7055 DCI.AddToWorklist(
Value.getNode());
7059 SN->getBasePtr(), SN->getMemoryVT(),
7060 SN->getMemOperand());
7064 if (!SN->isTruncatingStore() &&
7075 N->getOperand(0), BSwapOp,
N->getOperand(2)
7080 Ops, MemVT, SN->getMemOperand());
7083 if (!SN->isTruncatingStore() &&
7086 Subtarget.hasVectorEnhancements2()) {
7096 Ops, MemVT, SN->getMemOperand());
7101 if (!SN->isTruncatingStore() &&
7104 N->getOperand(0).reachesChainWithoutSideEffects(
SDValue(Op1.
getNode(), 1))) {
7108 Ops, MemVT, SN->getMemOperand());
7118 DAG.
getStore(SN->getChain(),
DL, HiPart, SN->getBasePtr(),
7119 SN->getPointerInfo(), SN->getOriginalAlign(),
7120 SN->getMemOperand()->getFlags(), SN->getAAInfo());
7125 SN->getPointerInfo().getWithOffset(8),
7126 SN->getOriginalAlign(),
7127 SN->getMemOperand()->
getFlags(), SN->getAAInfo());
7147 if (
C->getAPIntValue().getBitWidth() > 64 ||
C->isAllOnes() ||
7151 if (VCI.isVectorConstantLegal(Subtarget) &&
7160 auto FindReplicatedReg = [&](
SDValue MulOp) {
7161 EVT MulVT = MulOp.getValueType();
7162 if (MulOp->getOpcode() ==
ISD::MUL &&
7163 (MulVT == MVT::i16 || MulVT == MVT::i32 || MulVT == MVT::i64)) {
7167 WordVT =
LHS->getOperand(0).getValueType();
7169 WordVT = cast<VTSDNode>(
LHS->getOperand(1))->getVT();
7173 if (
auto *
C = dyn_cast<ConstantSDNode>(MulOp->getOperand(1))) {
7175 APInt(MulVT.getSizeInBits(),
C->getZExtValue()));
7176 if (VCI.isVectorConstantLegal(Subtarget) &&
7178 WordVT == VCI.VecVT.getScalarType())
7184 if (isa<BuildVectorSDNode>(Op1) &&
7187 if (
auto *
C = dyn_cast<ConstantSDNode>(SplatVal))
7190 FindReplicatedReg(SplatVal);
7192 if (
auto *
C = dyn_cast<ConstantSDNode>(Op1))
7195 FindReplicatedReg(Op1);
7200 "Bad type handling");
7205 SN->getBasePtr(), SN->getMemOperand());
7212SDValue SystemZTargetLowering::combineVECTOR_SHUFFLE(
7213 SDNode *
N, DAGCombinerInfo &DCI)
const {
7217 N->getOperand(0).hasOneUse() &&
7218 Subtarget.hasVectorEnhancements2()) {
7233 Ops,
LD->getMemoryVT(),
LD->getMemOperand());
7237 DCI.CombineTo(
N, ESLoad);
7241 DCI.CombineTo(
Load.getNode(), ESLoad, ESLoad.
getValue(1));
7251SDValue SystemZTargetLowering::combineEXTRACT_VECTOR_ELT(
7252 SDNode *
N, DAGCombinerInfo &DCI)
const {
7255 if (!Subtarget.hasVector())
7261 Op.getValueType().isVector() &&
7262 Op.getOperand(0).getValueType().isVector() &&
7263 Op.getValueType().getVectorNumElements() ==
7264 Op.getOperand(0).getValueType().getVectorNumElements())
7265 Op =
Op.getOperand(0);
7269 EVT VecVT =
Op.getValueType();
7272 Op.getOperand(0),
N->getOperand(1));
7273 DCI.AddToWorklist(
Op.getNode());
7275 if (EltVT !=
N->getValueType(0)) {
7276 DCI.AddToWorklist(
Op.getNode());
7283 if (
auto *IndexN = dyn_cast<ConstantSDNode>(
N->getOperand(1))) {
7286 return combineExtract(
SDLoc(
N),
N->getValueType(0), VecVT, Op0,
7287 IndexN->getZExtValue(), DCI,
false);
7292SDValue SystemZTargetLowering::combineJOIN_DWORDS(
7293 SDNode *
N, DAGCombinerInfo &DCI)
const {
7296 if (
N->getOperand(0) ==
N->getOperand(1))
7307 if (Chain1 == Chain2)
7315SDValue SystemZTargetLowering::combineFP_ROUND(
7316 SDNode *
N, DAGCombinerInfo &DCI)
const {
7318 if (!Subtarget.hasVector())
7327 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
7330 if (
N->getValueType(0) == MVT::f32 && Op0.
hasOneUse() &&
7336 for (
auto *U : Vec->
uses()) {
7337 if (U != Op0.
getNode() &&
U->hasOneUse() &&
7339 U->getOperand(0) == Vec &&
7341 U->getConstantOperandVal(1) == 1) {
7343 if (OtherRound.
getOpcode() ==
N->getOpcode() &&
7347 if (
N->isStrictFPOpcode()) {
7352 {MVT::v4f32, MVT::Other}, {Chain, Vec});
7357 DCI.AddToWorklist(VRound.
getNode());
7361 DCI.AddToWorklist(Extract1.
getNode());
7370 N->getVTList(), Extract0, Chain);
7379SDValue SystemZTargetLowering::combineFP_EXTEND(
7380 SDNode *
N, DAGCombinerInfo &DCI)
const {
7382 if (!Subtarget.hasVector())
7391 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
7394 if (
N->getValueType(0) == MVT::f64 && Op0.
hasOneUse() &&
7400 for (
auto *U : Vec->
uses()) {
7401 if (U != Op0.
getNode() &&
U->hasOneUse() &&
7403 U->getOperand(0) == Vec &&
7405 U->getConstantOperandVal(1) == 2) {
7407 if (OtherExtend.
getOpcode() ==
N->getOpcode() &&
7411 if (
N->isStrictFPOpcode()) {
7416 {MVT::v2f64, MVT::Other}, {Chain, Vec});
7421 DCI.AddToWorklist(VExtend.
getNode());
7425 DCI.AddToWorklist(Extract1.
getNode());
7434 N->getVTList(), Extract0, Chain);
7443SDValue SystemZTargetLowering::combineINT_TO_FP(
7444 SDNode *
N, DAGCombinerInfo &DCI)
const {
7449 unsigned Opcode =
N->getOpcode();
7450 EVT OutVT =
N->getValueType(0);
7454 unsigned InScalarBits =
Op->getValueType(0).getScalarSizeInBits();
7460 if (OutLLVMTy->
isVectorTy() && OutScalarBits > InScalarBits &&
7461 OutScalarBits <= 64) {
7462 unsigned NumElts = cast<FixedVectorType>(OutLLVMTy)->getNumElements();
7465 unsigned ExtOpcode =
7473SDValue SystemZTargetLowering::combineBSWAP(
7474 SDNode *
N, DAGCombinerInfo &DCI)
const {
7478 N->getOperand(0).hasOneUse() &&
7479 canLoadStoreByteSwapped(
N->getValueType(0))) {
7488 EVT LoadVT =
N->getValueType(0);
7489 if (LoadVT == MVT::i16)
7494 Ops,
LD->getMemoryVT(),
LD->getMemOperand());
7498 if (
N->getValueType(0) == MVT::i16)
7503 DCI.CombineTo(
N, ResVal);
7507 DCI.CombineTo(
Load.getNode(), ResVal, BSLoad.
getValue(1));
7516 Op.getValueType().isVector() &&
7517 Op.getOperand(0).getValueType().isVector() &&
7518 Op.getValueType().getVectorNumElements() ==
7519 Op.getOperand(0).getValueType().getVectorNumElements())
7520 Op =
Op.getOperand(0);
7532 (canLoadStoreByteSwapped(
N->getValueType(0)) &&
7534 EVT VecVT =
N->getValueType(0);
7535 EVT EltVT =
N->getValueType(0).getVectorElementType();
7538 DCI.AddToWorklist(Vec.
getNode());
7542 DCI.AddToWorklist(Elt.
getNode());
7545 DCI.AddToWorklist(Vec.
getNode());
7547 DCI.AddToWorklist(Elt.
getNode());
7555 if (SV &&
Op.hasOneUse()) {
7563 EVT VecVT =
N->getValueType(0);
7566 DCI.AddToWorklist(Op0.
getNode());
7570 DCI.AddToWorklist(Op1.
getNode());
7573 DCI.AddToWorklist(Op0.
getNode());
7575 DCI.AddToWorklist(Op1.
getNode());
7597 auto *CompareRHS = dyn_cast<ConstantSDNode>(ICmp->getOperand(1));
7604 bool Invert =
false;
7611 auto *TrueVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(0));
7614 auto *FalseVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1));
7617 if (CompareRHS->getZExtValue() == FalseVal->getZExtValue())
7619 else if (CompareRHS->getZExtValue() != TrueVal->getZExtValue())
7623 auto *NewCCValid = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(2));
7624 auto *NewCCMask = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(3));
7625 if (!NewCCValid || !NewCCMask)
7627 CCValid = NewCCValid->getZExtValue();
7628 CCMask = NewCCMask->getZExtValue();
7638 if (CompareLHS->getOpcode() ==
ISD::SRA) {
7639 auto *SRACount = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1));
7640 if (!SRACount || SRACount->getZExtValue() != 30)
7642 auto *SHL = CompareLHS->getOperand(0).getNode();
7645 auto *SHLCount = dyn_cast<ConstantSDNode>(SHL->getOperand(1));
7648 auto *IPM = SHL->getOperand(0).getNode();
7653 if (!CompareLHS->hasOneUse())
7656 if (CompareRHS->getZExtValue() != 0)
7663 CCReg = IPM->getOperand(0);
7670SDValue SystemZTargetLowering::combineBR_CCMASK(
7671 SDNode *
N, DAGCombinerInfo &DCI)
const {
7675 auto *CCValid = dyn_cast<ConstantSDNode>(
N->getOperand(1));
7676 auto *CCMask = dyn_cast<ConstantSDNode>(
N->getOperand(2));
7677 if (!CCValid || !CCMask)
7680 int CCValidVal = CCValid->getZExtValue();
7681 int CCMaskVal = CCMask->getZExtValue();
7690 N->getOperand(3), CCReg);
7694SDValue SystemZTargetLowering::combineSELECT_CCMASK(
7695 SDNode *
N, DAGCombinerInfo &DCI)
const {
7699 auto *CCValid = dyn_cast<ConstantSDNode>(
N->getOperand(2));
7700 auto *CCMask = dyn_cast<ConstantSDNode>(
N->getOperand(3));
7701 if (!CCValid || !CCMask)
7704 int CCValidVal = CCValid->getZExtValue();
7705 int CCMaskVal = CCMask->getZExtValue();
7710 N->getOperand(0),
N->getOperand(1),
7718SDValue SystemZTargetLowering::combineGET_CCMASK(
7719 SDNode *
N, DAGCombinerInfo &DCI)
const {
7722 auto *CCValid = dyn_cast<ConstantSDNode>(
N->getOperand(1));
7723 auto *CCMask = dyn_cast<ConstantSDNode>(
N->getOperand(2));
7724 if (!CCValid || !CCMask)
7726 int CCValidVal = CCValid->getZExtValue();
7727 int CCMaskVal = CCMask->getZExtValue();
7735 auto *SelectCCValid = dyn_cast<ConstantSDNode>(
Select->getOperand(2));
7736 auto *SelectCCMask = dyn_cast<ConstantSDNode>(
Select->getOperand(3));
7737 if (!SelectCCValid || !SelectCCMask)
7739 int SelectCCValidVal = SelectCCValid->getZExtValue();
7740 int SelectCCMaskVal = SelectCCMask->getZExtValue();
7742 auto *
TrueVal = dyn_cast<ConstantSDNode>(
Select->getOperand(0));
7743 auto *
FalseVal = dyn_cast<ConstantSDNode>(
Select->getOperand(1));
7744 if (!TrueVal || !FalseVal)
7748 else if (
TrueVal->getZExtValue() == 0 &&
FalseVal->getZExtValue() == 1)
7749 SelectCCMaskVal ^= SelectCCValidVal;
7753 if (SelectCCValidVal & ~CCValidVal)
7755 if (SelectCCMaskVal != (CCMaskVal & SelectCCValidVal))
7758 return Select->getOperand(4);
7761SDValue SystemZTargetLowering::combineIntDIVREM(
7762 SDNode *
N, DAGCombinerInfo &DCI)
const {
7764 EVT VT =
N->getValueType(0);
7778SDValue SystemZTargetLowering::combineINTRINSIC(
7779 SDNode *
N, DAGCombinerInfo &DCI)
const {
7782 unsigned Id =
N->getConstantOperandVal(1);
7786 case Intrinsic::s390_vll:
7787 case Intrinsic::s390_vlrl:
7788 if (
auto *
C = dyn_cast<ConstantSDNode>(
N->getOperand(2)))
7789 if (
C->getZExtValue() >= 15)
7794 case Intrinsic::s390_vstl:
7795 case Intrinsic::s390_vstrl:
7796 if (
auto *
C = dyn_cast<ConstantSDNode>(
N->getOperand(3)))
7797 if (
C->getZExtValue() >= 15)
7808 return N->getOperand(0);
7814 switch(
N->getOpcode()) {
7839 case ISD::UREM:
return combineIntDIVREM(
N, DCI);
7851 EVT VT =
Op.getValueType();
7854 unsigned Opcode =
Op.getOpcode();
7856 unsigned Id =
Op.getConstantOperandVal(0);
7858 case Intrinsic::s390_vpksh:
7859 case Intrinsic::s390_vpksf:
7860 case Intrinsic::s390_vpksg:
7861 case Intrinsic::s390_vpkshs:
7862 case Intrinsic::s390_vpksfs:
7863 case Intrinsic::s390_vpksgs:
7864 case Intrinsic::s390_vpklsh:
7865 case Intrinsic::s390_vpklsf:
7866 case Intrinsic::s390_vpklsg:
7867 case Intrinsic::s390_vpklshs:
7868 case Intrinsic::s390_vpklsfs:
7869 case Intrinsic::s390_vpklsgs:
7871 SrcDemE = DemandedElts;
7874 SrcDemE = SrcDemE.
trunc(NumElts / 2);
7877 case Intrinsic::s390_vuphb:
7878 case Intrinsic::s390_vuphh:
7879 case Intrinsic::s390_vuphf:
7880 case Intrinsic::s390_vuplhb:
7881 case Intrinsic::s390_vuplhh:
7882 case Intrinsic::s390_vuplhf:
7883 SrcDemE =
APInt(NumElts * 2, 0);
7886 case Intrinsic::s390_vuplb:
7887 case Intrinsic::s390_vuplhw:
7888 case Intrinsic::s390_vuplf:
7889 case Intrinsic::s390_vupllb:
7890 case Intrinsic::s390_vupllh:
7891 case Intrinsic::s390_vupllf:
7892 SrcDemE =
APInt(NumElts * 2, 0);
7895 case Intrinsic::s390_vpdi: {
7897 SrcDemE =
APInt(NumElts, 0);
7898 if (!DemandedElts[OpNo - 1])
7900 unsigned Mask =
Op.getConstantOperandVal(3);
7901 unsigned MaskBit = ((OpNo - 1) ? 1 : 4);
7903 SrcDemE.
setBit((Mask & MaskBit)? 1 : 0);
7906 case Intrinsic::s390_vsldb: {
7908 assert(VT == MVT::v16i8 &&
"Unexpected type.");
7909 unsigned FirstIdx =
Op.getConstantOperandVal(3);
7910 assert (FirstIdx > 0 && FirstIdx < 16 &&
"Unused operand.");
7911 unsigned NumSrc0Els = 16 - FirstIdx;
7912 SrcDemE =
APInt(NumElts, 0);
7914 APInt DemEls = DemandedElts.
trunc(NumSrc0Els);
7917 APInt DemEls = DemandedElts.
lshr(NumSrc0Els);
7922 case Intrinsic::s390_vperm:
7923 SrcDemE =
APInt(NumElts, -1);
7933 SrcDemE =
APInt(1, 1);
7936 SrcDemE = DemandedElts;
7947 const APInt &DemandedElts,
7962 const APInt &DemandedElts,
7964 unsigned Depth)
const {
7968 unsigned tmp0, tmp1;
7973 EVT VT =
Op.getValueType();
7974 if (
Op.getResNo() != 0 || VT == MVT::Untyped)
7977 "KnownBits does not match VT in bitwidth");
7980 "DemandedElts does not match VT number of elements");
7982 unsigned Opcode =
Op.getOpcode();
7984 bool IsLogical =
false;
7985 unsigned Id =
Op.getConstantOperandVal(0);
7987 case Intrinsic::s390_vpksh:
7988 case Intrinsic::s390_vpksf:
7989 case Intrinsic::s390_vpksg:
7990 case Intrinsic::s390_vpkshs:
7991 case Intrinsic::s390_vpksfs:
7992 case Intrinsic::s390_vpksgs:
7993 case Intrinsic::s390_vpklsh:
7994 case Intrinsic::s390_vpklsf:
7995 case Intrinsic::s390_vpklsg:
7996 case Intrinsic::s390_vpklshs:
7997 case Intrinsic::s390_vpklsfs:
7998 case Intrinsic::s390_vpklsgs:
7999 case Intrinsic::s390_vpdi:
8000 case Intrinsic::s390_vsldb:
8001 case Intrinsic::s390_vperm:
8004 case Intrinsic::s390_vuplhb:
8005 case Intrinsic::s390_vuplhh:
8006 case Intrinsic::s390_vuplhf:
8007 case Intrinsic::s390_vupllb:
8008 case Intrinsic::s390_vupllh:
8009 case Intrinsic::s390_vupllf:
8012 case Intrinsic::s390_vuphb:
8013 case Intrinsic::s390_vuphh:
8014 case Intrinsic::s390_vuphf:
8015 case Intrinsic::s390_vuplb:
8016 case Intrinsic::s390_vuplhw:
8017 case Intrinsic::s390_vuplf: {
8059 if (
LHS == 1)
return 1;
8062 if (
RHS == 1)
return 1;
8063 unsigned Common = std::min(
LHS,
RHS);
8064 unsigned SrcBitWidth =
Op.getOperand(OpNo).getScalarValueSizeInBits();
8065 EVT VT =
Op.getValueType();
8067 if (SrcBitWidth > VTBits) {
8068 unsigned SrcExtraBits = SrcBitWidth - VTBits;
8069 if (Common > SrcExtraBits)
8070 return (Common - SrcExtraBits);
8073 assert (SrcBitWidth == VTBits &&
"Expected operands of same bitwidth.");
8080 unsigned Depth)
const {
8081 if (
Op.getResNo() != 0)
8083 unsigned Opcode =
Op.getOpcode();
8085 unsigned Id =
Op.getConstantOperandVal(0);
8087 case Intrinsic::s390_vpksh:
8088 case Intrinsic::s390_vpksf:
8089 case Intrinsic::s390_vpksg:
8090 case Intrinsic::s390_vpkshs:
8091 case Intrinsic::s390_vpksfs:
8092 case Intrinsic::s390_vpksgs:
8093 case Intrinsic::s390_vpklsh:
8094 case Intrinsic::s390_vpklsf:
8095 case Intrinsic::s390_vpklsg:
8096 case Intrinsic::s390_vpklshs:
8097 case Intrinsic::s390_vpklsfs:
8098 case Intrinsic::s390_vpklsgs:
8099 case Intrinsic::s390_vpdi:
8100 case Intrinsic::s390_vsldb:
8101 case Intrinsic::s390_vperm:
8103 case Intrinsic::s390_vuphb:
8104 case Intrinsic::s390_vuphh:
8105 case Intrinsic::s390_vuphf:
8106 case Intrinsic::s390_vuplb:
8107 case Intrinsic::s390_vuplhw:
8108 case Intrinsic::s390_vuplf: {
8112 EVT VT =
Op.getValueType();
8136 switch (
Op->getOpcode()) {
8149 "Unexpected stack alignment");
8152 unsigned StackProbeSize =
8155 StackProbeSize &= ~(StackAlign - 1);
8156 return StackProbeSize ? StackProbeSize : StackAlign;
8173 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
8179 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
8205 if (Succ->isLiveIn(SystemZ::CC))
8216 switch (
MI.getOpcode()) {
8217 case SystemZ::Select32:
8218 case SystemZ::Select64:
8219 case SystemZ::Select128:
8220 case SystemZ::SelectF32:
8221 case SystemZ::SelectF64:
8222 case SystemZ::SelectF128:
8223 case SystemZ::SelectVR32:
8224 case SystemZ::SelectVR64:
8225 case SystemZ::SelectVR128:
8257 for (
auto *
MI : Selects) {
8258 Register DestReg =
MI->getOperand(0).getReg();
8259 Register TrueReg =
MI->getOperand(1).getReg();
8260 Register FalseReg =
MI->getOperand(2).getReg();
8265 if (
MI->getOperand(4).getImm() == (CCValid ^ CCMask))
8268 if (RegRewriteTable.
contains(TrueReg))
8269 TrueReg = RegRewriteTable[TrueReg].first;
8271 if (RegRewriteTable.
contains(FalseReg))
8272 FalseReg = RegRewriteTable[FalseReg].second;
8275 BuildMI(*SinkMBB, SinkInsertionPoint,
DL,
TII->get(SystemZ::PHI), DestReg)
8280 RegRewriteTable[DestReg] = std::make_pair(TrueReg, FalseReg);
8292 assert(TFL->hasReservedCallFrame(MF) &&
8293 "ADJSTACKDOWN and ADJSTACKUP should be no-ops");
8298 uint32_t NumBytes =
MI.getOperand(0).getImm();
8303 MI.eraseFromParent();
8314 unsigned CCValid =
MI.getOperand(3).getImm();
8315 unsigned CCMask =
MI.getOperand(4).getImm();
8327 assert(NextMI.getOperand(3).getImm() == CCValid &&
8328 "Bad CCValid operands since CC was not redefined.");
8329 if (NextMI.getOperand(4).getImm() == CCMask ||
8330 NextMI.getOperand(4).getImm() == (CCValid ^ CCMask)) {
8336 if (NextMI.definesRegister(SystemZ::CC,
nullptr) ||
8337 NextMI.usesCustomInsertionHook())
8340 for (
auto *SelMI : Selects)
8341 if (NextMI.readsVirtualRegister(SelMI->getOperand(0).getReg())) {
8345 if (NextMI.isDebugInstr()) {
8347 assert(NextMI.isDebugValue() &&
"Unhandled debug opcode.");
8350 }
else if (
User || ++Count > 20)
8355 bool CCKilled = (LastMI->
killsRegister(SystemZ::CC,
nullptr) ||
8387 for (
auto *SelMI : Selects)
8388 SelMI->eraseFromParent();
8391 for (
auto *DbgMI : DbgValues)
8392 MBB->
splice(InsertPos, StartMBB, DbgMI);
8403 unsigned StoreOpcode,
8404 unsigned STOCOpcode,
8405 bool Invert)
const {
8410 int64_t Disp =
MI.getOperand(2).getImm();
8411 Register IndexReg =
MI.getOperand(3).getReg();
8412 unsigned CCValid =
MI.getOperand(4).getImm();
8413 unsigned CCMask =
MI.getOperand(5).getImm();
8416 StoreOpcode =
TII->getOpcodeForOffset(StoreOpcode, Disp);
8421 for (
auto *
I :
MI.memoperands())
8430 if (STOCOpcode && !IndexReg && Subtarget.hasLoadStoreOnCond()) {
8442 MI.eraseFromParent();
8456 if (!
MI.killsRegister(SystemZ::CC,
nullptr) &&
8483 MI.eraseFromParent();
8519 int HiOpcode =
Unsigned? SystemZ::VECLG : SystemZ::VECG;
8538 Register Temp =
MRI.createVirtualRegister(&SystemZ::VR128BitRegClass);
8546 MI.eraseFromParent();
8557 bool Invert)
const {
8566 int64_t Disp =
MI.getOperand(2).getImm();
8568 Register BitShift =
MI.getOperand(4).getReg();
8569 Register NegBitShift =
MI.getOperand(5).getReg();
8570 unsigned BitSize =
MI.getOperand(6).getImm();
8574 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
8575 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
8576 assert(LOpcode && CSOpcode &&
"Displacement out of range");
8579 Register OrigVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8580 Register OldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8581 Register NewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8582 Register RotatedOldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8583 Register RotatedNewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8614 Register Tmp =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8619 }
else if (BinOpcode)
8642 MI.eraseFromParent();
8653 unsigned KeepOldMask)
const {
8661 int64_t Disp =
MI.getOperand(2).getImm();
8663 Register BitShift =
MI.getOperand(4).getReg();
8664 Register NegBitShift =
MI.getOperand(5).getReg();
8665 unsigned BitSize =
MI.getOperand(6).getImm();
8669 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
8670 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
8671 assert(LOpcode && CSOpcode &&
"Displacement out of range");
8674 Register OrigVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8675 Register OldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8676 Register NewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8677 Register RotatedOldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8678 Register RotatedAltVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8679 Register RotatedNewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8746 MI.eraseFromParent();
8762 int64_t Disp =
MI.getOperand(2).getImm();
8764 Register OrigSwapVal =
MI.getOperand(4).getReg();
8765 Register BitShift =
MI.getOperand(5).getReg();
8766 Register NegBitShift =
MI.getOperand(6).getReg();
8767 int64_t BitSize =
MI.getOperand(7).getImm();
8773 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
8774 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
8775 unsigned ZExtOpcode = BitSize == 8 ? SystemZ::LLCR : SystemZ::LLHR;
8776 assert(LOpcode && CSOpcode &&
"Displacement out of range");
8779 Register OrigOldVal =
MRI.createVirtualRegister(RC);
8782 Register StoreVal =
MRI.createVirtualRegister(RC);
8783 Register OldValRot =
MRI.createVirtualRegister(RC);
8784 Register RetryOldVal =
MRI.createVirtualRegister(RC);
8785 Register RetrySwapVal =
MRI.createVirtualRegister(RC);
8860 if (!
MI.registerDefIsDead(SystemZ::CC,
nullptr))
8863 MI.eraseFromParent();
8879 Register Tmp1 =
MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
8880 Register Tmp2 =
MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
8888 MI.eraseFromParent();
8897 bool ClearEven)
const {
8905 Register In128 =
MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
8909 Register NewIn128 =
MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
8910 Register Zero64 =
MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
8921 MI.eraseFromParent();
8928 unsigned Opcode,
bool IsMemset)
const {
8935 uint64_t DestDisp =
MI.getOperand(1).getImm();
8941 if (!isUInt<12>(Disp)) {
8942 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
8943 unsigned Opcode =
TII->getOpcodeForOffset(SystemZ::LA, Disp);
8953 SrcDisp =
MI.getOperand(3).getImm();
8956 SrcDisp = DestDisp++;
8957 foldDisplIfNeeded(DestBase, DestDisp);
8961 bool IsImmForm = LengthMO.
isImm();
8962 bool IsRegForm = !IsImmForm;
8969 unsigned Length) ->
void {
8988 bool NeedsLoop =
false;
8990 Register LenAdjReg = SystemZ::NoRegister;
8992 ImmLength = LengthMO.
getImm();
8993 ImmLength += IsMemset ? 2 : 1;
8994 if (ImmLength == 0) {
8995 MI.eraseFromParent();
8998 if (Opcode == SystemZ::CLC) {
8999 if (ImmLength > 3 * 256)
9009 }
else if (ImmLength > 6 * 256)
9017 LenAdjReg = LengthMO.
getReg();
9023 (Opcode == SystemZ::CLC && (ImmLength > 256 || NeedsLoop)
9029 MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
9031 TII->loadImmediate(*
MBB,
MI, StartCountReg, ImmLength / 256);
9042 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9046 if (DestBase.
isReg() && DestBase.
getReg() == SystemZ::NoRegister)
9047 DestBase = loadZeroAddress();
9048 if (SrcBase.
isReg() && SrcBase.
getReg() == SystemZ::NoRegister)
9049 SrcBase = HaveSingleBase ? DestBase : loadZeroAddress();
9059 (HaveSingleBase ? StartSrcReg :
forceReg(
MI, DestBase,
TII));
9062 Register ThisSrcReg =
MRI.createVirtualRegister(RC);
9064 (HaveSingleBase ? ThisSrcReg :
MRI.createVirtualRegister(RC));
9065 Register NextSrcReg =
MRI.createVirtualRegister(RC);
9067 (HaveSingleBase ? NextSrcReg :
MRI.createVirtualRegister(RC));
9068 RC = &SystemZ::GR64BitRegClass;
9069 Register ThisCountReg =
MRI.createVirtualRegister(RC);
9070 Register NextCountReg =
MRI.createVirtualRegister(RC);
9096 MBB = MemsetOneCheckMBB;
9139 if (EndMBB && !ImmLength)
9161 if (!HaveSingleBase)
9168 if (Opcode == SystemZ::MVC)
9195 if (!HaveSingleBase)
9217 Register RemSrcReg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9218 Register RemDestReg = HaveSingleBase ? RemSrcReg
9219 :
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9223 if (!HaveSingleBase)
9239 if (Opcode != SystemZ::MVC) {
9249 while (ImmLength > 0) {
9253 foldDisplIfNeeded(DestBase, DestDisp);
9254 foldDisplIfNeeded(SrcBase, SrcDisp);
9255 insertMemMemOp(
MBB,
MI, DestBase, DestDisp, SrcBase, SrcDisp, ThisLength);
9256 DestDisp += ThisLength;
9257 SrcDisp += ThisLength;
9258 ImmLength -= ThisLength;
9261 if (EndMBB && ImmLength > 0) {
9277 MI.eraseFromParent();
9290 uint64_t End1Reg =
MI.getOperand(0).getReg();
9291 uint64_t Start1Reg =
MI.getOperand(1).getReg();
9292 uint64_t Start2Reg =
MI.getOperand(2).getReg();
9293 uint64_t CharReg =
MI.getOperand(3).getReg();
9296 uint64_t This1Reg =
MRI.createVirtualRegister(RC);
9297 uint64_t This2Reg =
MRI.createVirtualRegister(RC);
9336 MI.eraseFromParent();
9343 bool NoFloat)
const {
9349 MI.setDesc(
TII->get(Opcode));
9353 uint64_t Control =
MI.getOperand(2).getImm();
9354 static const unsigned GPRControlBit[16] = {
9355 0x8000, 0x8000, 0x4000, 0x4000, 0x2000, 0x2000, 0x1000, 0x1000,
9356 0x0800, 0x0800, 0x0400, 0x0400, 0x0200, 0x0200, 0x0100, 0x0100
9358 Control |= GPRControlBit[15];
9360 Control |= GPRControlBit[11];
9361 MI.getOperand(2).setImm(Control);
9364 for (
int I = 0;
I < 16;
I++) {
9365 if ((Control & GPRControlBit[
I]) == 0) {
9372 if (!NoFloat && (Control & 4) != 0) {
9373 if (Subtarget.hasVector()) {
9405 MI.eraseFromParent();
9418 Register SizeReg =
MI.getOperand(2).getReg();
9430 Register PHIReg =
MRI->createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9431 Register IncReg =
MRI->createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9496 MI.eraseFromParent();
9500SDValue SystemZTargetLowering::
9511 switch (
MI.getOpcode()) {
9512 case SystemZ::ADJCALLSTACKDOWN:
9513 case SystemZ::ADJCALLSTACKUP:
9514 return emitAdjCallStack(
MI,
MBB);
9516 case SystemZ::Select32:
9517 case SystemZ::Select64:
9518 case SystemZ::Select128:
9519 case SystemZ::SelectF32:
9520 case SystemZ::SelectF64:
9521 case SystemZ::SelectF128:
9522 case SystemZ::SelectVR32:
9523 case SystemZ::SelectVR64:
9524 case SystemZ::SelectVR128:
9525 return emitSelect(
MI,
MBB);
9527 case SystemZ::CondStore8Mux:
9528 return emitCondStore(
MI,
MBB, SystemZ::STCMux, 0,
false);
9529 case SystemZ::CondStore8MuxInv:
9530 return emitCondStore(
MI,
MBB, SystemZ::STCMux, 0,
true);
9531 case SystemZ::CondStore16Mux:
9532 return emitCondStore(
MI,
MBB, SystemZ::STHMux, 0,
false);
9533 case SystemZ::CondStore16MuxInv:
9534 return emitCondStore(
MI,
MBB, SystemZ::STHMux, 0,
true);
9535 case SystemZ::CondStore32Mux:
9536 return emitCondStore(
MI,
MBB, SystemZ::STMux, SystemZ::STOCMux,
false);
9537 case SystemZ::CondStore32MuxInv:
9538 return emitCondStore(
MI,
MBB, SystemZ::STMux, SystemZ::STOCMux,
true);
9539 case SystemZ::CondStore8:
9540 return emitCondStore(
MI,
MBB, SystemZ::STC, 0,
false);
9541 case SystemZ::CondStore8Inv:
9542 return emitCondStore(
MI,
MBB, SystemZ::STC, 0,
true);
9543 case SystemZ::CondStore16:
9544 return emitCondStore(
MI,
MBB, SystemZ::STH, 0,
false);
9545 case SystemZ::CondStore16Inv:
9546 return emitCondStore(
MI,
MBB, SystemZ::STH, 0,
true);
9547 case SystemZ::CondStore32:
9548 return emitCondStore(
MI,
MBB, SystemZ::ST, SystemZ::STOC,
false);
9549 case SystemZ::CondStore32Inv:
9550 return emitCondStore(
MI,
MBB, SystemZ::ST, SystemZ::STOC,
true);
9551 case SystemZ::CondStore64:
9552 return emitCondStore(
MI,
MBB, SystemZ::STG, SystemZ::STOCG,
false);
9553 case SystemZ::CondStore64Inv:
9554 return emitCondStore(
MI,
MBB, SystemZ::STG, SystemZ::STOCG,
true);
9555 case SystemZ::CondStoreF32:
9556 return emitCondStore(
MI,
MBB, SystemZ::STE, 0,
false);
9557 case SystemZ::CondStoreF32Inv:
9558 return emitCondStore(
MI,
MBB, SystemZ::STE, 0,
true);
9559 case SystemZ::CondStoreF64:
9560 return emitCondStore(
MI,
MBB, SystemZ::STD, 0,
false);
9561 case SystemZ::CondStoreF64Inv:
9562 return emitCondStore(
MI,
MBB, SystemZ::STD, 0,
true);
9564 case SystemZ::SCmp128Hi:
9565 return emitICmp128Hi(
MI,
MBB,
false);
9566 case SystemZ::UCmp128Hi:
9567 return emitICmp128Hi(
MI,
MBB,
true);
9569 case SystemZ::PAIR128:
9570 return emitPair128(
MI,
MBB);
9571 case SystemZ::AEXT128:
9572 return emitExt128(
MI,
MBB,
false);
9573 case SystemZ::ZEXT128:
9574 return emitExt128(
MI,
MBB,
true);
9576 case SystemZ::ATOMIC_SWAPW:
9577 return emitAtomicLoadBinary(
MI,
MBB, 0);
9579 case SystemZ::ATOMIC_LOADW_AR:
9580 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::AR);
9581 case SystemZ::ATOMIC_LOADW_AFI:
9582 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::AFI);
9584 case SystemZ::ATOMIC_LOADW_SR:
9585 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::SR);
9587 case SystemZ::ATOMIC_LOADW_NR:
9588 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NR);
9589 case SystemZ::ATOMIC_LOADW_NILH:
9590 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NILH);
9592 case SystemZ::ATOMIC_LOADW_OR:
9593 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::OR);
9594 case SystemZ::ATOMIC_LOADW_OILH:
9595 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::OILH);
9597 case SystemZ::ATOMIC_LOADW_XR:
9598 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::XR);
9599 case SystemZ::ATOMIC_LOADW_XILF:
9600 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::XILF);
9602 case SystemZ::ATOMIC_LOADW_NRi:
9603 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NR,
true);
9604 case SystemZ::ATOMIC_LOADW_NILHi:
9605 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NILH,
true);
9607 case SystemZ::ATOMIC_LOADW_MIN:
9609 case SystemZ::ATOMIC_LOADW_MAX:
9611 case SystemZ::ATOMIC_LOADW_UMIN:
9613 case SystemZ::ATOMIC_LOADW_UMAX:
9616 case SystemZ::ATOMIC_CMP_SWAPW:
9617 return emitAtomicCmpSwapW(
MI,
MBB);
9618 case SystemZ::MVCImm:
9619 case SystemZ::MVCReg:
9620 return emitMemMemWrapper(
MI,
MBB, SystemZ::MVC);
9621 case SystemZ::NCImm:
9622 return emitMemMemWrapper(
MI,
MBB, SystemZ::NC);
9623 case SystemZ::OCImm:
9624 return emitMemMemWrapper(
MI,
MBB, SystemZ::OC);
9625 case SystemZ::XCImm:
9626 case SystemZ::XCReg:
9627 return emitMemMemWrapper(
MI,
MBB, SystemZ::XC);
9628 case SystemZ::CLCImm:
9629 case SystemZ::CLCReg:
9630 return emitMemMemWrapper(
MI,
MBB, SystemZ::CLC);
9631 case SystemZ::MemsetImmImm:
9632 case SystemZ::MemsetImmReg:
9633 case SystemZ::MemsetRegImm:
9634 case SystemZ::MemsetRegReg:
9635 return emitMemMemWrapper(
MI,
MBB, SystemZ::MVC,
true);
9636 case SystemZ::CLSTLoop:
9637 return emitStringWrapper(
MI,
MBB, SystemZ::CLST);
9638 case SystemZ::MVSTLoop:
9639 return emitStringWrapper(
MI,
MBB, SystemZ::MVST);
9640 case SystemZ::SRSTLoop:
9641 return emitStringWrapper(
MI,
MBB, SystemZ::SRST);
9642 case SystemZ::TBEGIN:
9643 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGIN,
false);
9644 case SystemZ::TBEGIN_nofloat:
9645 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGIN,
true);
9646 case SystemZ::TBEGINC:
9647 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGINC,
true);
9648 case SystemZ::LTEBRCompare_Pseudo:
9649 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTEBR);
9650 case SystemZ::LTDBRCompare_Pseudo:
9651 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTDBR);
9652 case SystemZ::LTXBRCompare_Pseudo:
9653 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTXBR);
9655 case SystemZ::PROBED_ALLOCA:
9656 return emitProbedAlloca(
MI,
MBB);
9658 case TargetOpcode::STACKMAP:
9659 case TargetOpcode::PATCHPOINT:
9670SystemZTargetLowering::getRepRegClassFor(
MVT VT)
const {
9671 if (VT == MVT::Untyped)
9672 return &SystemZ::ADDR128BitRegClass;
9698 DAG.
getMachineNode(SystemZ::EFPC, dl, {MVT::i32, MVT::Other}, Chain), 0);
9718 EVT VT =
Op.getValueType();
9719 Op =
Op.getOperand(0);
9720 EVT OpVT =
Op.getValueType();
9722 assert(OpVT.
isVector() &&
"Operand type for VECREDUCE_ADD is not a vector.");
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu AMDGPU Register Bank Select
static bool isZeroVector(SDValue N)
Function Alias Analysis Results
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
static bool isUndef(ArrayRef< int > Mask)
unsigned const TargetRegisterInfo * TRI
const char LLVMTargetMachineRef TM
static bool isSelectPseudo(MachineInstr &MI)
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static void adjustForLTGFR(Comparison &C)
static void adjustSubwordCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static SDValue joinDwords(SelectionDAG &DAG, const SDLoc &DL, SDValue Op0, SDValue Op1)
static bool isOnlyUsedByStores(SDValue StoredVal, SelectionDAG &DAG)
static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT, unsigned Opcode, SDValue Op0, SDValue Op1, SDValue &Even, SDValue &Odd)
static void adjustForRedundantAnd(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static SDValue buildScalarToVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Value)
static SDValue lowerI128ToGR128(SelectionDAG &DAG, SDValue In)
static bool isSimpleShift(SDValue N, unsigned &ShiftVal)
static bool isI128MovedToParts(LoadSDNode *LD, SDNode *&LoPart, SDNode *&HiPart)
static bool chooseShuffleOpNos(int *OpNos, unsigned &OpNo0, unsigned &OpNo1)
static uint32_t findZeroVectorIdx(SDValue *Ops, unsigned Num)
static bool isVectorElementSwap(ArrayRef< int > M, EVT VT)
static void getCSAddressAndShifts(SDValue Addr, SelectionDAG &DAG, SDLoc DL, SDValue &AlignedAddr, SDValue &BitShift, SDValue &NegBitShift)
static bool isShlDoublePermute(const SmallVectorImpl< int > &Bytes, unsigned &StartIndex, unsigned &OpNo0, unsigned &OpNo1)
static SDValue getPermuteNode(SelectionDAG &DAG, const SDLoc &DL, const Permute &P, SDValue Op0, SDValue Op1)
static SDNode * emitIntrinsicWithCCAndChain(SelectionDAG &DAG, SDValue Op, unsigned Opcode)
static SDValue getCCResult(SelectionDAG &DAG, SDValue CCReg)
static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode, unsigned &CCValid)
static void lowerMUL_LOHI32(SelectionDAG &DAG, const SDLoc &DL, unsigned Extend, SDValue Op0, SDValue Op1, SDValue &Hi, SDValue &Lo)
static bool isF128MovedToParts(LoadSDNode *LD, SDNode *&LoPart, SDNode *&HiPart)
static void createPHIsForSelects(SmallVector< MachineInstr *, 8 > &Selects, MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB, MachineBasicBlock *SinkMBB)
static SDValue getGeneralPermuteNode(SelectionDAG &DAG, const SDLoc &DL, SDValue *Ops, const SmallVectorImpl< int > &Bytes)
static unsigned getVectorComparisonOrInvert(ISD::CondCode CC, CmpMode Mode, bool &Invert)
static unsigned CCMaskForCondCode(ISD::CondCode CC)
static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static void adjustForFNeg(Comparison &C)
static bool isScalarToVector(SDValue Op)
static SDValue emitSETCC(SelectionDAG &DAG, const SDLoc &DL, SDValue CCReg, unsigned CCValid, unsigned CCMask)
static bool matchPermute(const SmallVectorImpl< int > &Bytes, const Permute &P, unsigned &OpNo0, unsigned &OpNo1)
static bool isAddCarryChain(SDValue Carry)
static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static MachineOperand earlyUseOperand(MachineOperand Op)
static bool canUseSiblingCall(const CCState &ArgCCInfo, SmallVectorImpl< CCValAssign > &ArgLocs, SmallVectorImpl< ISD::OutputArg > &Outs)
static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask)
static bool getzOSCalleeAndADA(SelectionDAG &DAG, SDValue &Callee, SDValue &ADA, SDLoc &DL, SDValue &Chain)
static bool shouldSwapCmpOperands(const Comparison &C)
static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType)
static SDValue getADAEntry(SelectionDAG &DAG, SDValue Val, SDLoc DL, unsigned Offset, bool LoadAdr=false)
static SDNode * emitIntrinsicWithCC(SelectionDAG &DAG, SDValue Op, unsigned Opcode)
static void adjustForSubtraction(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static bool getVPermMask(SDValue ShuffleOp, SmallVectorImpl< int > &Bytes)
static const Permute PermuteForms[]
static bool isI128MovedFromParts(SDValue Val, SDValue &LoPart, SDValue &HiPart)
static bool isSubBorrowChain(SDValue Carry)
static void adjustICmp128(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static APInt getDemandedSrcElements(SDValue Op, const APInt &DemandedElts, unsigned OpNo)
static SDValue getAbsolute(SelectionDAG &DAG, const SDLoc &DL, SDValue Op, bool IsNegative)
static unsigned computeNumSignBitsBinOp(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth, unsigned OpNo)
static SDValue expandBitCastI128ToF128(SelectionDAG &DAG, SDValue Src, const SDLoc &SL)
static SDValue tryBuildVectorShuffle(SelectionDAG &DAG, BuildVectorSDNode *BVN)
static unsigned getVectorComparison(ISD::CondCode CC, CmpMode Mode)
static SDValue lowerGR128ToI128(SelectionDAG &DAG, SDValue In)
static SDValue MergeInputChains(SDNode *N1, SDNode *N2)
static SDValue expandBitCastF128ToI128(SelectionDAG &DAG, SDValue Src, const SDLoc &SL)
static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask, uint64_t Mask, uint64_t CmpVal, unsigned ICmpType)
static bool isIntrinsicWithCC(SDValue Op, unsigned &Opcode, unsigned &CCValid)
static SDValue expandV4F32ToV2F64(SelectionDAG &DAG, int Start, const SDLoc &DL, SDValue Op, SDValue Chain)
static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1, ISD::CondCode Cond, const SDLoc &DL, SDValue Chain=SDValue(), bool IsSignaling=false)
static bool checkCCKill(MachineInstr &MI, MachineBasicBlock *MBB)
static Register forceReg(MachineInstr &MI, MachineOperand &Base, const SystemZInstrInfo *TII)
static bool is32Bit(EVT VT)
static std::pair< unsigned, const TargetRegisterClass * > parseRegisterNumber(StringRef Constraint, const TargetRegisterClass *RC, const unsigned *Map, unsigned Size)
static bool matchDoublePermute(const SmallVectorImpl< int > &Bytes, const Permute &P, SmallVectorImpl< int > &Transform)
static Comparison getIntrinsicCmp(SelectionDAG &DAG, unsigned Opcode, SDValue Call, unsigned CCValid, uint64_t CC, ISD::CondCode Cond)
static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg)
static AddressingMode getLoadStoreAddrMode(bool HasVector, Type *Ty)
static SDValue buildMergeScalars(SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Op0, SDValue Op1)
static void computeKnownBitsBinOp(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth, unsigned OpNo)
static bool getShuffleInput(const SmallVectorImpl< int > &Bytes, unsigned Start, unsigned BytesPerElement, int &Base)
static AddressingMode supportedAddressingMode(Instruction *I, bool HasVector)
static bool isF128MovedFromParts(SDValue Val, SDValue &LoPart, SDValue &HiPart)
static void adjustZeroCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
Class for arbitrary precision integers.
APInt zext(unsigned width) const
Zero extend to a new width.
uint64_t getZExtValue() const
Get zero extended value.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
unsigned getActiveBits() const
Compute the number of active bits in the value.
APInt trunc(unsigned width) const
Truncate to new width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
static APInt getBitsSet(unsigned numBits, unsigned loBit, unsigned hiBit)
Get a value with a block of bits set.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isSingleWord() const
Determine if this APInt just has one word to store value.
void insertBits(const APInt &SubBits, unsigned bitPosition)
Insert the bits from a smaller APInt starting at bitPosition.
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
an instruction that atomically reads a memory location, combines it with another value,...
BinOp getOperation() const
StringRef getValueAsString() const
Return the attribute's value as a string.
The address of a basic block.
A "pseudo-class" with methods for operating on BUILD_VECTORs.
bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector.
CCState - This class holds information needed while lowering arguments and return values.
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
LocInfo getLocInfo() const
int64_t getLocMemOffset() const
This class represents a function call, abstracting a target machine's calling convention.
This is an important base class in LLVM.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
uint64_t getFnAttributeAsParsedInteger(StringRef Kind, uint64_t Default=0) const
For a string attribute Kind, parse attribute as an integer.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
const GlobalObject * getAliaseeObject() const
bool hasPrivateLinkage() const
bool hasInternalLinkage() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
An instruction for reading from memory.
This class is used to represent ISD::LOAD nodes.
static auto integer_fixedlen_vector_valuetypes()
bool isVector() const
Return true if this is a vector value type.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
static MVT getVectorVT(MVT VT, unsigned NumElements)
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator_range< succ_iterator > successors()
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setAdjustsStack(bool V)
void setFrameAddressIsTaken(bool T)
void setReturnAddressIsTaken(bool s)
unsigned getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
void setMaxCallFrameSize(unsigned S)
MachineFunctionProperties & reset(Property P)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
reverse_iterator rbegin()
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineFunctionProperties & getProperties() const
Get the function properties.
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr reads the specified register.
bool killsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr kills the specified register.
bool definesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr fully defines the specified register.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
Register getReg() const
getReg - Returns the register number.
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
This class provides iterator support for SDUse operands that use a specific SDNode.
Represents one node in the SelectionDAG.
bool hasOneUse() const
Return true if there is exactly one use of this node.
iterator_range< use_iterator > uses()
SDNodeFlags getFlags() const
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
void setFlags(SDNodeFlags NewFlags)
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
bool isMachineOpcode() const
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
const APInt & getConstantOperandAPInt(unsigned i) const
uint64_t getScalarValueSizeInBits() const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
unsigned getMachineOpcode() const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
SDNode * isConstantIntBuildVectorOrConstantInt(SDValue N) const
Test whether the given value is a constant int or similar node.
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getGLOBAL_OFFSET_TABLE(EVT VT)
Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, MachineMemOperand *MMO)
Gets a node for an atomic op, produces result (if relevant) and chain and takes 2 operands.
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
const TargetLowering & getTargetLoweringInfo() const
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
const DataLayout & getDataLayout() const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
bool SignBitIsZero(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero.
SDValue getRegister(unsigned Reg, EVT VT)
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getRegisterMask(const uint32_t *RegMask)
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
ArrayRef< int > getMask() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
constexpr size_t size() const
size - Get the string size.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
A SystemZ-specific class detailing special use registers particular for calling conventions.
virtual int getStackPointerBias()=0
virtual int getReturnFunctionAddressRegister()=0
virtual int getCallFrameSize()=0
virtual int getStackPointerRegister()=0
A SystemZ-specific constant pool value.
static SystemZConstantPoolValue * Create(const GlobalValue *GV, SystemZCP::SystemZCPModifier Modifier)
unsigned getVarArgsFrameIndex() const
void setVarArgsFrameIndex(unsigned FI)
void setRegSaveFrameIndex(unsigned FI)
void incNumLocalDynamicTLSAccesses()
Register getVarArgsFirstGPR() const
void setADAVirtualRegister(Register Reg)
void setVarArgsFirstGPR(Register GPR)
Register getADAVirtualRegister() const
void setSizeOfFnParams(unsigned Size)
void setVarArgsFirstFPR(Register FPR)
unsigned getRegSaveFrameIndex() const
Register getVarArgsFirstFPR() const
const SystemZInstrInfo * getInstrInfo() const override
bool isPC32DBLSymbol(const GlobalValue *GV, CodeModel::Model CM) const
const TargetFrameLowering * getFrameLowering() const override
bool isTargetXPLINK64() const
SystemZCallingConventionRegisters * getSpecialRegisters() const
const SystemZRegisterInfo * getRegisterInfo() const override
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes) const override
Returns the target specific optimal type for load and store operations as a result of memset,...
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
bool hasInlineStackProbe(const MachineFunction &MF) const override
Returns true if stack probing through inline assembly is requested.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *BB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
AtomicExpansionKind shouldCastAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be cast by the IR-level AtomicExpand pass.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &, EVT) const override
Return the ValueType of the result of SETCC operations.
bool allowTruncateForTailCall(Type *, Type *) const override
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
bool findOptimalMemOpLowering(std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const override
Determines the optimal series of memory ops to replace the memset / memcpy.
bool useSoftFloat() const override
std::pair< SDValue, SDValue > makeExternalCall(SDValue Chain, SelectionDAG &DAG, const char *CalleeName, EVT RetVT, ArrayRef< SDValue > Ops, CallingConv::ID CallConv, bool IsSigned, SDLoc DL, bool DoesNotReturn, bool IsReturnValueUsed) const
bool mayBeEmittedAsTailCall(const CallInst *CI) const override
Return true if the target may be able emit the call instruction as a tail call.
bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const override
Target-specific splitting of values into parts that fit a register storing a legal type.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
SystemZTargetLowering(const TargetMachine &TM, const SystemZSubtarget &STI)
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
bool isLegalICmpImmediate(int64_t Imm) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
TargetLowering::ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const override
Determine if the target supports unaligned memory accesses.
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
TargetLowering::ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const override
Target-specific combining of register parts into its original value.
bool isTruncateFree(Type *, Type *) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const override
Determine the number of bits in the operation that are sign bits.
void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
bool isLegalAddImmediate(int64_t Imm) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
bool isGuaranteedNotToBeUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, unsigned Depth) const override
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
AtomicExpansionKind shouldCastAtomicStoreInIR(StoreInst *SI) const override
Returns how the given (atomic) store should be cast by the IR-level AtomicExpand pass into.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
unsigned getStackProbeSize(const MachineFunction &MF) const
XPLINK64 calling convention specific use registers Particular to z/OS when in 64 bit mode.
int getCallFrameSize() final
Information about stack frame layout on the target.
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
virtual bool hasFP(const MachineFunction &MF) const =0
hasFP - Return true if the specified function should have a dedicated frame pointer register.
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const
Returns true if arguments should be sign-extended in lib calls.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
void setAtomicLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Let target indicate that an extending atomic load of the specified type is legal.
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
virtual const TargetRegisterClass * getRepRegClassFor(MVT VT) const
Return the 'representative' register class for the specified value type.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool findOptimalMemOpLowering(std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const
Determines the optimal series of memory ops to replace the memset / memcpy.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Primary interface to the complete machine description for the target machine.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
unsigned getPointerSize(unsigned AS) const
Get the pointer size for this target.
CodeModel::Model getCodeModel() const
Returns the code model.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
constexpr ScalarTy getFixedValue() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ MEMBARRIER
MEMBARRIER - Compiler barrier only; generate a no-op.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ STRICT_FSQRT
Constrained versions of libm-equivalent floating point intrinsics.
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
@ BR_JT
BR_JT - Jumptable branch.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ UNDEF
UNDEF - An undefined node.
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ GET_ROUNDING
Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest, ties to even 2 Round to ...
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ STRICT_SINT_TO_FP
STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to a floating point value.
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ STRICT_FADD
Constrained versions of the binary floating point operators.
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ BRCOND
BRCOND - Conditional branch.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ GET_DYNAMIC_AREA_OFFSET
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isNormalStore(const SDNode *N)
Returns true if the specified node is a non-truncating and unindexed store.
CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
@ Define
Register definition.
@ System
Synchronized with respect to all concurrently executing threads.
@ MO_ADA_DATA_SYMBOL_ADDR
@ MO_ADA_DIRECT_FUNC_DESC
@ MO_ADA_INDIRECT_FUNC_DESC
const unsigned GR64Regs[16]
const unsigned VR128Regs[32]
const unsigned GR128Regs[16]
const unsigned FP32Regs[16]
const unsigned GR32Regs[16]
const unsigned FP64Regs[16]
const int64_t ELFCallFrameSize
const unsigned VR64Regs[32]
const unsigned FP128Regs[16]
const unsigned VR32Regs[32]
unsigned odd128(bool Is32bit)
const unsigned CCMASK_CMP_GE
static bool isImmHH(uint64_t Val)
const unsigned CCMASK_TEND
const unsigned CCMASK_CS_EQ
const unsigned CCMASK_TBEGIN
const MCPhysReg ELFArgFPRs[ELFNumArgFPRs]
MachineBasicBlock * splitBlockBefore(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
const unsigned CCMASK_TM_SOME_1
const unsigned CCMASK_LOGICAL_CARRY
const unsigned TDCMASK_NORMAL_MINUS
const unsigned CCMASK_TDC
const unsigned CCMASK_FCMP
const unsigned CCMASK_TM_SOME_0
static bool isImmHL(uint64_t Val)
const unsigned TDCMASK_SUBNORMAL_MINUS
const unsigned TDCMASK_NORMAL_PLUS
const unsigned CCMASK_CMP_GT
const unsigned TDCMASK_QNAN_MINUS
const unsigned CCMASK_ANY
const unsigned CCMASK_ARITH
const unsigned CCMASK_TM_MIXED_MSB_0
const unsigned TDCMASK_SUBNORMAL_PLUS
static bool isImmLL(uint64_t Val)
const unsigned VectorBits
static bool isImmLH(uint64_t Val)
MachineBasicBlock * emitBlockAfter(MachineBasicBlock *MBB)
const unsigned TDCMASK_INFINITY_PLUS
unsigned reverseCCMask(unsigned CCMask)
const unsigned CCMASK_TM_ALL_0
const unsigned CCMASK_CMP_LE
const unsigned CCMASK_CMP_O
const unsigned CCMASK_CMP_EQ
const unsigned VectorBytes
const unsigned TDCMASK_INFINITY_MINUS
const unsigned CCMASK_ICMP
const unsigned CCMASK_VCMP_ALL
MachineBasicBlock * splitBlockAfter(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
const unsigned CCMASK_VCMP
const unsigned CCMASK_TM_MIXED_MSB_1
const unsigned CCMASK_TM_MSB_0
const unsigned CCMASK_ARITH_OVERFLOW
const unsigned CCMASK_CS_NE
const unsigned TDCMASK_SNAN_PLUS
const unsigned CCMASK_CMP_LT
const unsigned CCMASK_CMP_NE
const unsigned TDCMASK_ZERO_PLUS
const unsigned TDCMASK_QNAN_PLUS
const unsigned TDCMASK_ZERO_MINUS
unsigned even128(bool Is32bit)
const unsigned CCMASK_TM_ALL_1
const unsigned CCMASK_LOGICAL_BORROW
const unsigned ELFNumArgFPRs
const unsigned CCMASK_CMP_UO
const unsigned CCMASK_LOGICAL
const unsigned CCMASK_TM_MSB_1
const unsigned TDCMASK_SNAN_MINUS
Reg
All possible values of the reg field in the ModR/M byte.
support::ulittle32_t Word
NodeAddr< CodeNode * > Code
constexpr const char32_t SBase
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
testing::Matcher< const detail::ErrorHolder & > Failed()
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void dumpBytes(ArrayRef< uint8_t > Bytes, raw_ostream &OS)
Convert ‘Bytes’ to a hex string and output to ‘OS’.
T bit_ceil(T Value)
Returns the smallest integral power of two no smaller than Value if Value is nonzero.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
@ Mul
Product of integers.
DWARFExpression::Operation Op
constexpr unsigned BitWidth
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
AddressingMode(bool LongDispl, bool IdxReg)
This struct is a compact representation of a valid (non-zero power of two) alignment.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isRound() const
Return true if the size is a power-of-two number of bytes.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
unsigned getBitWidth() const
Get the bit width of this value.
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
void resetAll()
Resets the known state of all bits.
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
These are IR-level optimization flags that may be propagated to SDNodes.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
SystemZVectorConstantInfo(APInt IntImm)
SmallVector< unsigned, 2 > OpVals
bool isVectorConstantLegal(const SystemZSubtarget &Subtarget)
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This contains information for each constraint that we are lowering.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setZExtResult(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setSExtResult(bool Value=true)
CallLoweringInfo & setNoReturn(bool Value=true)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})