24#include "llvm/IR/IntrinsicsS390.h"
32#define DEBUG_TYPE "systemz-lower"
38 : Op0(Op0In), Op1(Op1In), Chain(ChainIn),
39 Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {}
89 if (Subtarget.hasHighWord())
95 if (Subtarget.hasVector()) {
102 if (Subtarget.hasVectorEnhancements1())
107 if (Subtarget.hasVector()) {
116 if (Subtarget.hasVector())
143 for (
unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
144 I <= MVT::LAST_FP_VALUETYPE;
170 for (
unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
171 I <= MVT::LAST_INTEGER_VALUETYPE;
207 if (Subtarget.hasPopulationCount())
226 if (!Subtarget.hasFPExtension())
232 if (Subtarget.hasFPExtension())
237 if (Subtarget.hasFPExtension())
314 if (!Subtarget.hasFPExtension()) {
327 if (Subtarget.hasMiscellaneousExtensions3()) {
420 if (VT != MVT::v2i64)
426 if (Subtarget.hasVectorEnhancements1())
453 if (Subtarget.hasVector()) {
475 if (Subtarget.hasVectorEnhancements2()) {
496 for (
unsigned I = MVT::FIRST_FP_VALUETYPE;
497 I <= MVT::LAST_FP_VALUETYPE;
505 if (Subtarget.hasFPExtension()) {
533 if (Subtarget.hasFPExtension()) {
544 if (Subtarget.hasVector()) {
590 if (Subtarget.hasVectorEnhancements1()) {
597 if (Subtarget.hasVectorEnhancements1()) {
651 for (
auto VT : { MVT::f32, MVT::f64, MVT::f128,
652 MVT::v4f32, MVT::v2f64 }) {
661 if (!Subtarget.hasVectorEnhancements1()) {
667 if (Subtarget.hasVectorEnhancements1())
677 if (Subtarget.hasVectorEnhancements1()) {
689 if (!Subtarget.hasVector()) {
744 struct RTLibCallMapping {
748 static RTLibCallMapping RTLibCallCommon[] = {
749#define HANDLE_LIBCALL(code, name) {RTLIB::code, name},
750#include "ZOSLibcallNames.def"
752 for (
auto &
E : RTLibCallCommon)
758 return Subtarget.hasSoftFloat();
780 return Subtarget.hasVectorEnhancements1();
793 if (!Subtarget.hasVector() ||
794 (isFP128 && !Subtarget.hasVectorEnhancements1()))
816 if (SplatBitSize > 64)
822 if (isInt<16>(SignedValue)) {
831 if (
TII->isRxSBGMask(
Value, SplatBitSize, Start,
End)) {
853 uint64_t Lower = SplatUndefZ & maskTrailingOnes<uint64_t>(LowerBits);
854 uint64_t Upper = SplatUndefZ & maskLeadingOnes<uint64_t>(UpperBits);
861 uint64_t Middle = SplatUndefZ & ~Upper & ~Lower;
862 return tryValue(SplatBitsZ | Middle);
877 unsigned HalfSize = Width / 2;
882 if (HighValue != LowValue || 8 > HalfSize)
885 SplatBits = HighValue;
889 SplatBitSize = Width;
897 BVN->
isConstantSplat(IntBits, SplatUndef, SplatBitSize, HasAnyUndefs, 128,
901 BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, 8,
906 bool ForCodeSize)
const {
908 if (Imm.isZero() || Imm.isNegZero())
930 if (Subtarget.hasInterlockedAccess1() &&
944 return isInt<32>(Imm) || isUInt<32>(Imm);
949 return isUInt<32>(Imm) || isUInt<32>(-Imm);
971 LongDisplacement(LongDispl), IndexReg(IdxReg) {}
994 switch (II->getIntrinsicID()) {
996 case Intrinsic::memset:
997 case Intrinsic::memmove:
998 case Intrinsic::memcpy:
1003 if (isa<LoadInst>(
I) &&
I->hasOneUse()) {
1004 auto *SingleUser = cast<Instruction>(*
I->user_begin());
1005 if (SingleUser->getParent() ==
I->getParent()) {
1006 if (isa<ICmpInst>(SingleUser)) {
1007 if (
auto *
C = dyn_cast<ConstantInt>(SingleUser->getOperand(1)))
1008 if (
C->getBitWidth() <= 64 &&
1009 (isInt<16>(
C->getSExtValue()) || isUInt<16>(
C->getZExtValue())))
1012 }
else if (isa<StoreInst>(SingleUser))
1016 }
else if (
auto *StoreI = dyn_cast<StoreInst>(
I)) {
1017 if (
auto *LoadI = dyn_cast<LoadInst>(StoreI->getValueOperand()))
1018 if (LoadI->hasOneUse() && LoadI->getParent() ==
I->getParent())
1023 if (HasVector && (isa<LoadInst>(
I) || isa<StoreInst>(
I))) {
1031 Type *MemAccessTy = (isa<LoadInst>(
I) ?
I->getType() :
1032 I->getOperand(0)->getType());
1034 bool IsVectorAccess = MemAccessTy->
isVectorTy();
1038 if (!IsVectorAccess && isa<StoreInst>(
I)) {
1039 Value *DataOp =
I->getOperand(0);
1040 if (isa<ExtractElementInst>(DataOp))
1041 IsVectorAccess =
true;
1046 if (!IsVectorAccess && isa<LoadInst>(
I) &&
I->hasOneUse()) {
1047 User *LoadUser = *
I->user_begin();
1048 if (isa<InsertElementInst>(LoadUser))
1049 IsVectorAccess =
true;
1052 if (IsFPAccess || IsVectorAccess)
1081 return AM.
Scale == 0;
1088 std::vector<EVT> &MemOps,
unsigned Limit,
const MemOp &
Op,
unsigned DstAS,
1089 unsigned SrcAS,
const AttributeList &FuncAttributes)
const {
1090 const int MVCFastLen = 16;
1092 if (Limit != ~
unsigned(0)) {
1094 if (
Op.isMemcpy() &&
Op.allowOverlap() &&
Op.size() <= MVCFastLen)
1096 if (
Op.isMemset() &&
Op.size() - 1 <= MVCFastLen)
1098 if (
Op.isZeroMemset())
1103 SrcAS, FuncAttributes);
1108 return Subtarget.hasVector() ? MVT::v2i64 : MVT::Other;
1112 if (!FromType->isIntegerTy() || !ToType->
isIntegerTy())
1114 unsigned FromBits = FromType->getPrimitiveSizeInBits().getFixedValue();
1116 return FromBits > ToBits;
1124 return FromBits > ToBits;
1133 if (Constraint.
size() == 1) {
1134 switch (Constraint[0]) {
1160 }
else if (Constraint.
size() == 2 && Constraint[0] ==
'Z') {
1161 switch (Constraint[1]) {
1177 const char *constraint)
const {
1179 Value *CallOperandVal =
info.CallOperandVal;
1182 if (!CallOperandVal)
1186 switch (*constraint) {
1204 if (Subtarget.hasVector())
1210 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1211 if (isUInt<8>(
C->getZExtValue()))
1216 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1217 if (isUInt<12>(
C->getZExtValue()))
1222 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1223 if (isInt<16>(
C->getSExtValue()))
1228 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1229 if (isInt<20>(
C->getSExtValue()))
1234 if (
auto *
C = dyn_cast<ConstantInt>(CallOperandVal))
1235 if (
C->getZExtValue() == 0x7fffffff)
1245static std::pair<unsigned, const TargetRegisterClass *>
1247 const unsigned *Map,
unsigned Size) {
1248 assert(*(Constraint.
end()-1) ==
'}' &&
"Missing '}'");
1249 if (isdigit(Constraint[2])) {
1254 return std::make_pair(Map[
Index], RC);
1256 return std::make_pair(0U,
nullptr);
1259std::pair<unsigned, const TargetRegisterClass *>
1262 if (Constraint.
size() == 1) {
1264 switch (Constraint[0]) {
1269 return std::make_pair(0U, &SystemZ::GR64BitRegClass);
1271 return std::make_pair(0U, &SystemZ::GR128BitRegClass);
1272 return std::make_pair(0U, &SystemZ::GR32BitRegClass);
1276 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass);
1277 else if (VT == MVT::i128)
1278 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass);
1279 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass);
1282 return std::make_pair(0U, &SystemZ::GRH32BitRegClass);
1287 return std::make_pair(0U, &SystemZ::FP64BitRegClass);
1289 return std::make_pair(0U, &SystemZ::FP128BitRegClass);
1290 return std::make_pair(0U, &SystemZ::FP32BitRegClass);
1295 if (Subtarget.hasVector()) {
1297 return std::make_pair(0U, &SystemZ::VR32BitRegClass);
1299 return std::make_pair(0U, &SystemZ::VR64BitRegClass);
1300 return std::make_pair(0U, &SystemZ::VR128BitRegClass);
1309 auto getVTSizeInBits = [&VT]() {
1317 if (Constraint[1] ==
'r') {
1318 if (getVTSizeInBits() == 32)
1321 if (getVTSizeInBits() == 128)
1327 if (Constraint[1] ==
'f') {
1329 return std::make_pair(
1331 if (getVTSizeInBits() == 32)
1334 if (getVTSizeInBits() == 128)
1340 if (Constraint[1] ==
'v') {
1341 if (!Subtarget.hasVector())
1342 return std::make_pair(
1344 if (getVTSizeInBits() == 32)
1347 if (getVTSizeInBits() == 64)
1374 const Constant *PersonalityFn)
const {
1379 const Constant *PersonalityFn)
const {
1387 if (Constraint.
size() == 1) {
1388 switch (Constraint[0]) {
1390 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1391 if (isUInt<8>(
C->getZExtValue()))
1393 Op.getValueType()));
1397 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1398 if (isUInt<12>(
C->getZExtValue()))
1400 Op.getValueType()));
1404 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1405 if (isInt<16>(
C->getSExtValue()))
1407 Op.getValueType()));
1411 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1412 if (isInt<20>(
C->getSExtValue()))
1414 Op.getValueType()));
1418 if (
auto *
C = dyn_cast<ConstantSDNode>(
Op))
1419 if (
C->getZExtValue() == 0x7fffffff)
1421 Op.getValueType()));
1432#include "SystemZGenCallingConv.inc"
1436 static const MCPhysReg ScratchRegs[] = { SystemZ::R0D, SystemZ::R1D,
1442 Type *ToType)
const {
1505 if (BitCastToType == MVT::v2i64)
1530 MVT::Untyped,
Hi,
Lo);
1554 unsigned NumParts,
MVT PartVT, std::optional<CallingConv::ID>
CC)
const {
1556 if (ValueVT.
getSizeInBits() == 128 && NumParts == 1 && PartVT == MVT::Untyped) {
1567 MVT PartVT,
EVT ValueVT, std::optional<CallingConv::ID>
CC)
const {
1568 if (ValueVT.
getSizeInBits() == 128 && NumParts == 1 && PartVT == MVT::Untyped) {
1595 unsigned NumFixedGPRs = 0;
1596 unsigned NumFixedFPRs = 0;
1597 for (
unsigned I = 0,
E = ArgLocs.
size();
I !=
E; ++
I) {
1610 RC = &SystemZ::GR32BitRegClass;
1614 RC = &SystemZ::GR64BitRegClass;
1618 RC = &SystemZ::FP32BitRegClass;
1622 RC = &SystemZ::FP64BitRegClass;
1626 RC = &SystemZ::FP128BitRegClass;
1634 RC = &SystemZ::VR128BitRegClass;
1663 ArgValue = DAG.
getLoad(LocVT,
DL, Chain, FIN,
1674 unsigned ArgIndex = Ins[
I].OrigArgIndex;
1675 assert (Ins[
I].PartOffset == 0);
1676 while (
I + 1 !=
E && Ins[
I + 1].OrigArgIndex == ArgIndex) {
1678 unsigned PartOffset = Ins[
I + 1].PartOffset;
1701 int64_t VarArgOffset = CCInfo.
getStackSize() + Regs->getCallFrameSize();
1719 int64_t RegSaveOffset =
1734 &SystemZ::FP64BitRegClass);
1752 MRI.addLiveIn(Regs->getADARegister(), ADAvReg);
1764 for (
unsigned I = 0,
E = ArgLocs.
size();
I !=
E; ++
I) {
1771 if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D)
1773 if (Outs[
I].Flags.isSwiftSelf() || Outs[
I].Flags.isSwiftError())
1780 unsigned Offset,
bool LoadAdr =
false) {
1803 bool LoadAddr =
false;
1804 const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV);
1825 unsigned ADADelta = 0;
1826 unsigned EPADelta = 8;
1831 if (
auto *
G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1832 bool IsInternal = (
G->getGlobal()->hasInternalLinkage() ||
1833 G->getGlobal()->hasPrivateLinkage());
1848 }
else if (
auto *
E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1910 for (
unsigned I = 0,
E = ArgLocs.
size();
I !=
E; ++
I) {
1916 unsigned ArgIndex = Outs[
I].OrigArgIndex;
1918 if (
I + 1 !=
E && Outs[
I + 1].OrigArgIndex == ArgIndex) {
1920 Type *OrigArgType = CLI.
Args[Outs[
I].OrigArgIndex].Ty;
1926 SlotVT = Outs[
I].VT;
1929 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
1935 assert (Outs[
I].PartOffset == 0);
1936 while (
I + 1 !=
E && Outs[
I + 1].OrigArgIndex == ArgIndex) {
1937 SDValue PartValue = OutVals[
I + 1];
1938 unsigned PartOffset = Outs[
I + 1].PartOffset;
1945 SlotVT.
getStoreSize()) &&
"Not enough space for argument part!");
1948 ArgValue = SpillSlot;
1965 if (!StackPtr.getNode())
1987 RegsToPass.
push_back(std::make_pair(SystemZ::R3D, ShadowArgValue));
1993 if (!MemOpChains.
empty())
2006 ->getAddressOfCalleeRegister();
2009 Callee = DAG.
getRegister(CalleeReg, Callee.getValueType());
2014 if (
auto *
G = dyn_cast<GlobalAddressSDNode>(Callee)) {
2017 }
else if (
auto *
E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
2020 }
else if (IsTailCall) {
2023 Callee = DAG.
getRegister(SystemZ::R1D, Callee.getValueType());
2028 for (
unsigned I = 0,
E = RegsToPass.
size();
I !=
E; ++
I) {
2030 RegsToPass[
I].second, Glue);
2041 for (
unsigned I = 0,
E = RegsToPass.
size();
I !=
E; ++
I)
2043 RegsToPass[
I].second.getValueType()));
2047 const uint32_t *Mask =
TRI->getCallPreservedMask(MF, CallConv);
2048 assert(Mask &&
"Missing call preserved mask for calling convention");
2072 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, Ctx);
2076 for (
unsigned I = 0,
E = RetLocs.
size();
I !=
E; ++
I) {
2098 bool DoesNotReturn,
bool IsReturnValueUsed)
const {
2100 Args.reserve(Ops.
size());
2105 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.
getContext());
2108 Args.push_back(Entry);
2134 for (
auto &Out : Outs)
2135 if (Out.ArgVT == MVT::i128)
2140 return RetCCInfo.
CheckReturn(Outs, RetCC_SystemZ);
2157 if (RetLocs.
empty())
2167 for (
unsigned I = 0,
E = RetLocs.
size();
I !=
E; ++
I) {
2196 unsigned &CCValid) {
2197 unsigned Id =
Op.getConstantOperandVal(1);
2199 case Intrinsic::s390_tbegin:
2204 case Intrinsic::s390_tbegin_nofloat:
2209 case Intrinsic::s390_tend:
2223 unsigned Id =
Op.getConstantOperandVal(0);
2225 case Intrinsic::s390_vpkshs:
2226 case Intrinsic::s390_vpksfs:
2227 case Intrinsic::s390_vpksgs:
2232 case Intrinsic::s390_vpklshs:
2233 case Intrinsic::s390_vpklsfs:
2234 case Intrinsic::s390_vpklsgs:
2239 case Intrinsic::s390_vceqbs:
2240 case Intrinsic::s390_vceqhs:
2241 case Intrinsic::s390_vceqfs:
2242 case Intrinsic::s390_vceqgs:
2247 case Intrinsic::s390_vchbs:
2248 case Intrinsic::s390_vchhs:
2249 case Intrinsic::s390_vchfs:
2250 case Intrinsic::s390_vchgs:
2255 case Intrinsic::s390_vchlbs:
2256 case Intrinsic::s390_vchlhs:
2257 case Intrinsic::s390_vchlfs:
2258 case Intrinsic::s390_vchlgs:
2263 case Intrinsic::s390_vtm:
2268 case Intrinsic::s390_vfaebs:
2269 case Intrinsic::s390_vfaehs:
2270 case Intrinsic::s390_vfaefs:
2275 case Intrinsic::s390_vfaezbs:
2276 case Intrinsic::s390_vfaezhs:
2277 case Intrinsic::s390_vfaezfs:
2282 case Intrinsic::s390_vfeebs:
2283 case Intrinsic::s390_vfeehs:
2284 case Intrinsic::s390_vfeefs:
2289 case Intrinsic::s390_vfeezbs:
2290 case Intrinsic::s390_vfeezhs:
2291 case Intrinsic::s390_vfeezfs:
2296 case Intrinsic::s390_vfenebs:
2297 case Intrinsic::s390_vfenehs:
2298 case Intrinsic::s390_vfenefs:
2303 case Intrinsic::s390_vfenezbs:
2304 case Intrinsic::s390_vfenezhs:
2305 case Intrinsic::s390_vfenezfs:
2310 case Intrinsic::s390_vistrbs:
2311 case Intrinsic::s390_vistrhs:
2312 case Intrinsic::s390_vistrfs:
2317 case Intrinsic::s390_vstrcbs:
2318 case Intrinsic::s390_vstrchs:
2319 case Intrinsic::s390_vstrcfs:
2324 case Intrinsic::s390_vstrczbs:
2325 case Intrinsic::s390_vstrczhs:
2326 case Intrinsic::s390_vstrczfs:
2331 case Intrinsic::s390_vstrsb:
2332 case Intrinsic::s390_vstrsh:
2333 case Intrinsic::s390_vstrsf:
2338 case Intrinsic::s390_vstrszb:
2339 case Intrinsic::s390_vstrszh:
2340 case Intrinsic::s390_vstrszf:
2345 case Intrinsic::s390_vfcedbs:
2346 case Intrinsic::s390_vfcesbs:
2351 case Intrinsic::s390_vfchdbs:
2352 case Intrinsic::s390_vfchsbs:
2357 case Intrinsic::s390_vfchedbs:
2358 case Intrinsic::s390_vfchesbs:
2363 case Intrinsic::s390_vftcidb:
2364 case Intrinsic::s390_vftcisb:
2369 case Intrinsic::s390_tdc:
2387 for (
unsigned I = 2;
I < NumOps; ++
I)
2390 assert(
Op->getNumValues() == 2 &&
"Expected only CC result and chain");
2396 return Intr.getNode();
2406 for (
unsigned I = 1;
I < NumOps; ++
I)
2410 return Intr.getNode();
2420 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \
2421 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \
2422 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X
2447 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1.getNode());
2448 if (!ConstOp1 || ConstOp1->getValueSizeInBits(0) > 64)
2451 int64_t
Value = ConstOp1->getSExtValue();
2467 if (!
C.Op0.hasOneUse() ||
2473 auto *Load = cast<LoadSDNode>(
C.Op0);
2474 unsigned NumBits = Load->getMemoryVT().getSizeInBits();
2475 if ((NumBits != 8 && NumBits != 16) ||
2476 NumBits != Load->getMemoryVT().getStoreSizeInBits())
2481 auto *ConstOp1 = cast<ConstantSDNode>(
C.Op1);
2482 if (!ConstOp1 || ConstOp1->getValueSizeInBits(0) > 64)
2485 uint64_t Mask = (1 << NumBits) - 1;
2488 int64_t SignedValue = ConstOp1->getSExtValue();
2495 }
else if (NumBits == 8) {
2521 if (
C.Op0.getValueType() != MVT::i32 ||
2522 Load->getExtensionType() != ExtType) {
2524 Load->getBasePtr(), Load->getPointerInfo(),
2525 Load->getMemoryVT(), Load->getAlign(),
2526 Load->getMemOperand()->getFlags());
2532 if (
C.Op1.getValueType() != MVT::i32 ||
2533 Value != ConstOp1->getZExtValue())
2540 auto *Load = dyn_cast<LoadSDNode>(
Op.getNode());
2543 if (Load->getMemoryVT() == MVT::i8)
2546 switch (Load->getExtensionType()) {
2563 if (
C.Op0.getValueType() == MVT::i128)
2565 if (
C.Op0.getValueType() == MVT::f128)
2571 if (isa<ConstantFPSDNode>(
C.Op1))
2576 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1);
2577 if (ConstOp1 && ConstOp1->getZExtValue() == 0)
2595 isUInt<16>(ConstOp1->getZExtValue()))
2600 isInt<16>(ConstOp1->getSExtValue()))
2606 unsigned Opcode0 =
C.Op0.getOpcode();
2613 C.Op0.getConstantOperandVal(1) == 0xffffffff)
2628 ((
N->getOperand(0) ==
C.Op0 &&
N->getOperand(1) ==
C.Op1) ||
2629 (
N->getOperand(0) ==
C.Op1 &&
N->getOperand(1) ==
C.Op0))) {
2633 Flags.setNoSignedWrap(
false);
2634 Flags.setNoUnsignedWrap(
false);
2653 auto *C1 = dyn_cast<ConstantFPSDNode>(
C.Op1);
2654 if (C1 && C1->isZero()) {
2673 if (
C.Op0.getOpcode() ==
ISD::SHL &&
C.Op0.getValueType() == MVT::i64 &&
2675 auto *C1 = dyn_cast<ConstantSDNode>(
C.Op0.getOperand(1));
2676 if (C1 && C1->getZExtValue() == 32) {
2677 SDValue ShlOp0 =
C.Op0.getOperand(0);
2681 cast<VTSDNode>(
N->getOperand(1))->getVT() == MVT::i32) {
2696 C.Op0.getOperand(0).getOpcode() ==
ISD::LOAD &&
2698 cast<ConstantSDNode>(
C.Op1)->getValueSizeInBits(0) <= 64 &&
2699 C.Op1->getAsZExtVal() == 0) {
2700 auto *L = cast<LoadSDNode>(
C.Op0.getOperand(0));
2701 if (L->getMemoryVT().getStoreSizeInBits().getFixedValue() <=
2702 C.Op0.getValueSizeInBits().getFixedValue()) {
2703 unsigned Type = L->getExtensionType();
2706 C.Op0 =
C.Op0.getOperand(0);
2716 auto *Shift = dyn_cast<ConstantSDNode>(
N.getOperand(1));
2720 uint64_t Amount = Shift->getZExtValue();
2721 if (Amount >=
N.getValueSizeInBits())
2736 unsigned ICmpType) {
2737 assert(Mask != 0 &&
"ANDs with zero should have been removed by now");
2759 if (EffectivelyUnsigned && CmpVal > 0 && CmpVal <=
Low) {
2765 if (EffectivelyUnsigned && CmpVal <
Low) {
2773 if (CmpVal == Mask) {
2779 if (EffectivelyUnsigned && CmpVal >= Mask -
Low && CmpVal < Mask) {
2785 if (EffectivelyUnsigned && CmpVal > Mask -
Low && CmpVal <= Mask) {
2793 if (EffectivelyUnsigned && CmpVal >= Mask -
High && CmpVal <
High) {
2799 if (EffectivelyUnsigned && CmpVal > Mask -
High && CmpVal <=
High) {
2828 if (
C.Op0.getValueType() == MVT::i128) {
2833 auto *Mask = dyn_cast<ConstantSDNode>(
C.Op1);
2834 if (Mask && Mask->getAPIntValue() == 0) {
2849 auto *ConstOp1 = dyn_cast<ConstantSDNode>(
C.Op1);
2852 uint64_t CmpVal = ConstOp1->getZExtValue();
2859 NewC.Op0 =
C.Op0.getOperand(0);
2860 NewC.Op1 =
C.Op0.getOperand(1);
2861 Mask = dyn_cast<ConstantSDNode>(NewC.Op1);
2864 MaskVal = Mask->getZExtValue();
2869 if (NewC.Op0.getValueType() != MVT::i64 ||
2884 MaskVal = -(CmpVal & -CmpVal);
2892 unsigned BitSize = NewC.Op0.getValueSizeInBits();
2893 unsigned NewCCMask, ShiftVal;
2895 NewC.Op0.getOpcode() ==
ISD::SHL &&
2897 (MaskVal >> ShiftVal != 0) &&
2898 ((CmpVal >> ShiftVal) << ShiftVal) == CmpVal &&
2900 MaskVal >> ShiftVal,
2903 NewC.Op0 = NewC.Op0.getOperand(0);
2904 MaskVal >>= ShiftVal;
2906 NewC.Op0.getOpcode() ==
ISD::SRL &&
2908 (MaskVal << ShiftVal != 0) &&
2909 ((CmpVal << ShiftVal) >> ShiftVal) == CmpVal &&
2911 MaskVal << ShiftVal,
2914 NewC.Op0 = NewC.Op0.getOperand(0);
2915 MaskVal <<= ShiftVal;
2926 if (Mask && Mask->getZExtValue() == MaskVal)
2931 C.CCMask = NewCCMask;
2939 if (
C.Op0.getValueType() != MVT::i128)
2957 bool Swap =
false, Invert =
false;
2976 C.CCMask ^=
C.CCValid;
2986 auto *Mask = dyn_cast<ConstantSDNode>(
C.Op0.getOperand(1));
2987 if (!Mask || Mask->getValueSizeInBits(0) > 64)
2990 if ((~Known.
Zero).getZExtValue() & ~Mask->getZExtValue())
2993 C.Op0 =
C.Op0.getOperand(0);
3005 C.CCValid = CCValid;
3008 C.CCMask =
CC < 4 ? 1 << (3 -
CC) : 0;
3011 C.CCMask =
CC < 4 ? ~(1 << (3 -
CC)) : -1;
3015 C.CCMask =
CC < 4 ? ~0U << (4 -
CC) : -1;
3018 C.CCMask =
CC < 4 ? ~(~0U << (4 -
CC)) : 0;
3022 C.CCMask =
CC < 4 ? ~0U << (3 -
CC) : -1;
3025 C.CCMask =
CC < 4 ? ~(~0U << (3 -
CC)) : 0;
3028 C.CCMask &= CCValid;
3036 bool IsSignaling =
false) {
3039 unsigned Opcode, CCValid;
3051 Comparison
C(CmpOp0, CmpOp1, Chain);
3053 if (
C.Op0.getValueType().isFloatingPoint()) {
3057 else if (!IsSignaling)
3079 C.CCMask &= ~SystemZ::CCMASK_CMP_UO;
3100 if (!
C.Op1.getNode()) {
3102 switch (
C.Op0.getOpcode()) {
3129 return DAG.
getNode(
C.Opcode,
DL, VTs,
C.Chain,
C.Op0,
C.Op1);
3131 return DAG.
getNode(
C.Opcode,
DL, MVT::i32,
C.Op0,
C.Op1);
3140 Op0 = DAG.
getNode(Extend,
DL, MVT::i64, Op0);
3141 Op1 = DAG.
getNode(Extend,
DL, MVT::i64, Op1);
3166 unsigned CCValid,
unsigned CCMask) {
3195 case CmpMode::Int:
return 0;
3215 case CmpMode::FP:
return 0;
3216 case CmpMode::StrictFP:
return 0;
3217 case CmpMode::SignalingFP:
return 0;
3249 int Mask[] = { Start, -1, Start + 1, -1 };
3269 !Subtarget.hasVectorEnhancements1()) {
3283 SDValue Ops[2] = { Res, NewChain };
3292 return DAG.
getNode(Opcode,
DL, VTs, Chain, CmpOp0, CmpOp1);
3294 return DAG.
getNode(Opcode,
DL, VT, CmpOp0, CmpOp1);
3307 bool IsSignaling)
const {
3310 assert (!IsSignaling || Chain);
3311 CmpMode Mode = IsSignaling ? CmpMode::SignalingFP :
3312 Chain ? CmpMode::StrictFP : IsFP ? CmpMode::FP : CmpMode::Int;
3313 bool Invert =
false;
3321 assert(IsFP &&
"Unexpected integer comparison");
3323 DL, VT, CmpOp1, CmpOp0, Chain);
3325 DL, VT, CmpOp0, CmpOp1, Chain);
3329 LT.getValue(1),
GE.getValue(1));
3338 assert(IsFP &&
"Unexpected integer comparison");
3340 DL, VT, CmpOp1, CmpOp0, Chain);
3342 DL, VT, CmpOp0, CmpOp1, Chain);
3346 LT.getValue(1),
GT.getValue(1));
3355 Cmp = getVectorCmp(DAG, Opcode,
DL, VT, CmpOp0, CmpOp1, Chain);
3359 Cmp = getVectorCmp(DAG, Opcode,
DL, VT, CmpOp1, CmpOp0, Chain);
3364 Chain =
Cmp.getValue(1);
3372 if (Chain && Chain.
getNode() !=
Cmp.getNode()) {
3385 EVT VT =
Op.getValueType();
3387 return lowerVectorSETCC(DAG,
DL, VT,
CC, CmpOp0, CmpOp1);
3389 Comparison
C(
getCmp(DAG, CmpOp0, CmpOp1,
CC,
DL));
3396 bool IsSignaling)
const {
3402 EVT VT =
Op.getNode()->getValueType(0);
3404 SDValue Res = lowerVectorSETCC(DAG,
DL, VT,
CC, CmpOp0, CmpOp1,
3405 Chain, IsSignaling);
3409 Comparison
C(
getCmp(DAG, CmpOp0, CmpOp1,
CC,
DL, Chain, IsSignaling));
3424 Comparison
C(
getCmp(DAG, CmpOp0, CmpOp1,
CC,
DL));
3461 Comparison
C(
getCmp(DAG, CmpOp0, CmpOp1,
CC,
DL));
3469 cast<ConstantSDNode>(
C.Op1)->getValueSizeInBits(0) <= 64 &&
3470 C.Op1->getAsZExtVal() == 0) {
3478 SDValue Ops[] = {TrueOp, FalseOp,
3552 Chain = DAG.
getCopyToReg(Chain,
DL, SystemZ::R2D, GOTOffset, Glue);
3559 Node->getValueType(0),
3571 assert(Mask &&
"Missing call preserved mask for calling convention");
3579 Chain = DAG.
getNode(Opcode,
DL, NodeTys, Ops);
3586SDValue SystemZTargetLowering::lowerThreadPointer(
const SDLoc &
DL,
3618 SDValue TP = lowerThreadPointer(
DL, DAG);
3726 if (
CP->isMachineConstantPoolEntry())
3745 unsigned Depth =
Op.getConstantOperandVal(0);
3752 int BackChainIdx = TFL->getOrCreateFramePointerSaveIndex(MF);
3781 unsigned Depth =
Op.getConstantOperandVal(0);
3789 SDValue FrameAddr = lowerFRAMEADDR(
Op, DAG);
3791 int Offset = (TFL->usePackedStack(MF) ? -2 : 14) *
3808 EVT InVT =
In.getValueType();
3809 EVT ResVT =
Op.getValueType();
3814 if (
auto *LoadN = dyn_cast<LoadSDNode>(In))
3817 LoadN->getBasePtr(), LoadN->getMemOperand());
3823 if (InVT == MVT::i32 && ResVT == MVT::f32) {
3825 if (Subtarget.hasHighWord()) {
3829 MVT::i64,
SDValue(U64, 0), In);
3837 DL, MVT::f32, Out64);
3839 if (InVT == MVT::f32 && ResVT == MVT::i32) {
3842 MVT::f64,
SDValue(U64, 0), In);
3844 if (Subtarget.hasHighWord())
3858 return lowerVASTART_XPLINK(
Op, DAG);
3860 return lowerVASTART_ELF(
Op, DAG);
3875 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
3889 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
3893 const unsigned NumFields = 4;
3904 for (
unsigned I = 0;
I < NumFields; ++
I) {
3909 MemOps[
I] = DAG.
getStore(Chain,
DL, Fields[
I], FieldAddr,
3921 const Value *DstSV = cast<SrcValueSDNode>(
Op.getOperand(3))->getValue();
3922 const Value *SrcSV = cast<SrcValueSDNode>(
Op.getOperand(4))->getValue();
3928 Align(8),
false,
false,
3934SystemZTargetLowering::lowerDYNAMIC_STACKALLOC(
SDValue Op,
3937 return lowerDYNAMIC_STACKALLOC_XPLINK(
Op, DAG);
3939 return lowerDYNAMIC_STACKALLOC_ELF(
Op, DAG);
3943SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_XPLINK(
SDValue Op,
3955 uint64_t AlignVal = (RealignOpt ?
Align->getAsZExtVal() : 0);
3958 uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
3959 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;
3965 if (ExtraAlignSpace)
3969 bool IsSigned =
false;
3970 bool DoesNotReturn =
false;
3971 bool IsReturnValueUsed =
false;
3972 EVT VT =
Op.getValueType();
3983 Register SPReg = Regs.getStackPointerRegister();
3994 if (ExtraAlignSpace) {
4006SystemZTargetLowering::lowerDYNAMIC_STACKALLOC_ELF(
SDValue Op,
4020 uint64_t AlignVal = (RealignOpt ?
Align->getAsZExtVal() : 0);
4023 uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
4024 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;
4035 Backchain = DAG.
getLoad(MVT::i64,
DL, Chain, getBackchainAddress(OldSP, DAG),
4039 if (ExtraAlignSpace)
4047 DAG.
getVTList(MVT::i64, MVT::Other), Chain, OldSP, NeededSpace);
4063 if (RequiredAlign > StackAlign) {
4073 Chain = DAG.
getStore(Chain,
DL, Backchain, getBackchainAddress(NewSP, DAG),
4080SDValue SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET(
4089 EVT VT =
Op.getValueType();
4096 Op.getOperand(1), Ops[1], Ops[0]);
4097 else if (Subtarget.hasMiscellaneousExtensions2())
4102 Op.getOperand(0),
Op.getOperand(1), Ops[1], Ops[0]);
4126 LL, RL, Ops[1], Ops[0]);
4137 EVT VT =
Op.getValueType();
4144 Op.getOperand(1), Ops[1], Ops[0]);
4150 Op.getOperand(0),
Op.getOperand(1), Ops[1], Ops[0]);
4158 EVT VT =
Op.getValueType();
4178 EVT VT =
Op.getValueType();
4185 Op.getOperand(0),
Op.getOperand(1), Ops[1], Ops[0]);
4190 assert(
Op.getValueType() == MVT::i64 &&
"Should be 64-bit operation");
4193 SDValue Ops[] = {
Op.getOperand(0),
Op.getOperand(1)};
4202 if ((Masks[0] >> 32) == 0xffffffff &&
uint32_t(Masks[1]) == 0xffffffff)
4204 else if ((Masks[1] >> 32) == 0xffffffff &&
uint32_t(Masks[0]) == 0xffffffff)
4220 if (!isInt<16>(
Value))
4241 MVT::i64, HighOp, Low32);
4252 if (
N->getValueType(0) == MVT::i128) {
4253 unsigned BaseOp = 0;
4254 unsigned FlagOp = 0;
4255 switch (
Op.getOpcode()) {
4274 unsigned BaseOp = 0;
4275 unsigned CCValid = 0;
4276 unsigned CCMask = 0;
4278 switch (
Op.getOpcode()) {
4306 if (
N->getValueType(1) == MVT::i1)
4329 MVT VT =
N->getSimpleValueType(0);
4340 if (VT == MVT::i128) {
4341 unsigned BaseOp = 0;
4342 unsigned FlagOp = 0;
4343 switch (
Op.getOpcode()) {
4363 unsigned BaseOp = 0;
4364 unsigned CCValid = 0;
4365 unsigned CCMask = 0;
4367 switch (
Op.getOpcode()) {
4396 if (
N->getValueType(1) == MVT::i1)
4404 EVT VT =
Op.getValueType();
4406 Op =
Op.getOperand(0);
4454 if (NumSignificantBits == 0)
4460 BitSize = std::min(BitSize, OrigBitSize);
4469 for (int64_t
I = BitSize / 2;
I >= 8;
I =
I / 2) {
4471 if (BitSize != OrigBitSize)
4509 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
4510 if (
Node->getMemoryVT() == MVT::i128) {
4517 Node->getChain(),
Node->getBasePtr(),
4518 Node->getMemoryVT(),
Node->getMemOperand());
4524 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
4525 if (
Node->getMemoryVT() == MVT::i128) {
4532 Node->getBasePtr(),
Node->getMemoryVT(),
4533 Node->getMemOperand());
4538 MVT::Other, Chain), 0);
4547 EVT PtrVT =
Addr.getValueType();
4548 EVT WideVT = MVT::i32;
4571 unsigned Opcode)
const {
4572 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
4575 EVT NarrowVT =
Node->getMemoryVT();
4576 EVT WideVT = MVT::i32;
4577 if (NarrowVT == WideVT)
4589 if (
auto *Const = dyn_cast<ConstantSDNode>(Src2)) {
4594 SDValue AlignedAddr, BitShift, NegBitShift;
4612 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift,
4631 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
4632 EVT MemVT =
Node->getMemoryVT();
4633 if (MemVT == MVT::i32 || MemVT == MVT::i64) {
4635 assert(
Op.getValueType() == MemVT &&
"Mismatched VTs");
4636 assert(Subtarget.hasInterlockedAccess1() &&
4637 "Should have been expanded by AtomicExpand pass.");
4643 Node->getChain(),
Node->getBasePtr(), NegSrc2,
4644 Node->getMemOperand());
4653 auto *
Node = cast<AtomicSDNode>(
Op.getNode());
4661 if (
Node->getMemoryVT() == MVT::i128) {
4670 EVT NarrowVT =
Node->getMemoryVT();
4671 EVT WideVT = NarrowVT == MVT::i64 ? MVT::i64 : MVT::i32;
4672 if (NarrowVT == WideVT) {
4674 SDValue Ops[] = { ChainIn,
Addr, CmpVal, SwapVal };
4676 DL, Tys, Ops, NarrowVT, MMO);
4690 SDValue AlignedAddr, BitShift, NegBitShift;
4695 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift,
4698 VTList, Ops, NarrowVT, MMO);
4712SystemZTargetLowering::getTargetMMOFlags(
const Instruction &
I)
const {
4717 if (
auto *SI = dyn_cast<StoreInst>(&
I))
4720 if (
auto *LI = dyn_cast<LoadInst>(&
I))
4723 if (
auto *AI = dyn_cast<AtomicRMWInst>(&
I))
4726 if (
auto *AI = dyn_cast<AtomicCmpXchgInst>(&
I))
4738 "in GHC calling convention");
4740 Regs->getStackPointerRegister(),
Op.getValueType());
4751 "in GHC calling convention");
4758 if (StoreBackchain) {
4760 Chain,
DL, Regs->getStackPointerRegister(), MVT::i64);
4761 Backchain = DAG.
getLoad(MVT::i64,
DL, Chain, getBackchainAddress(OldSP, DAG),
4765 Chain = DAG.
getCopyToReg(Chain,
DL, Regs->getStackPointerRegister(), NewSP);
4768 Chain = DAG.
getStore(Chain,
DL, Backchain, getBackchainAddress(NewSP, DAG),
4776 bool IsData =
Op.getConstantOperandVal(4);
4779 return Op.getOperand(0);
4782 bool IsWrite =
Op.getConstantOperandVal(2);
4784 auto *
Node = cast<MemIntrinsicSDNode>(
Op.getNode());
4788 Node->getVTList(), Ops,
4789 Node->getMemoryVT(),
Node->getMemOperand());
4801SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(
SDValue Op,
4803 unsigned Opcode, CCValid;
4805 assert(
Op->getNumValues() == 2 &&
"Expected only CC result and chain");
4816SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(
SDValue Op,
4818 unsigned Opcode, CCValid;
4821 if (
Op->getNumValues() == 1)
4823 assert(
Op->getNumValues() == 2 &&
"Expected a CC and non-CC result");
4828 unsigned Id =
Op.getConstantOperandVal(0);
4830 case Intrinsic::thread_pointer:
4831 return lowerThreadPointer(
SDLoc(
Op), DAG);
4833 case Intrinsic::s390_vpdi:
4835 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4837 case Intrinsic::s390_vperm:
4839 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4841 case Intrinsic::s390_vuphb:
4842 case Intrinsic::s390_vuphh:
4843 case Intrinsic::s390_vuphf:
4847 case Intrinsic::s390_vuplhb:
4848 case Intrinsic::s390_vuplhh:
4849 case Intrinsic::s390_vuplhf:
4853 case Intrinsic::s390_vuplb:
4854 case Intrinsic::s390_vuplhw:
4855 case Intrinsic::s390_vuplf:
4859 case Intrinsic::s390_vupllb:
4860 case Intrinsic::s390_vupllh:
4861 case Intrinsic::s390_vupllf:
4865 case Intrinsic::s390_vsumb:
4866 case Intrinsic::s390_vsumh:
4867 case Intrinsic::s390_vsumgh:
4868 case Intrinsic::s390_vsumgf:
4869 case Intrinsic::s390_vsumqf:
4870 case Intrinsic::s390_vsumqg:
4872 Op.getOperand(1),
Op.getOperand(2));
4874 case Intrinsic::s390_vaq:
4876 Op.getOperand(1),
Op.getOperand(2));
4877 case Intrinsic::s390_vaccb:
4878 case Intrinsic::s390_vacch:
4879 case Intrinsic::s390_vaccf:
4880 case Intrinsic::s390_vaccg:
4881 case Intrinsic::s390_vaccq:
4883 Op.getOperand(1),
Op.getOperand(2));
4884 case Intrinsic::s390_vacq:
4886 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4887 case Intrinsic::s390_vacccq:
4889 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4891 case Intrinsic::s390_vsq:
4893 Op.getOperand(1),
Op.getOperand(2));
4894 case Intrinsic::s390_vscbib:
4895 case Intrinsic::s390_vscbih:
4896 case Intrinsic::s390_vscbif:
4897 case Intrinsic::s390_vscbig:
4898 case Intrinsic::s390_vscbiq:
4900 Op.getOperand(1),
Op.getOperand(2));
4901 case Intrinsic::s390_vsbiq:
4903 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4904 case Intrinsic::s390_vsbcbiq:
4906 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4927 { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 } },
4930 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
4933 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
4936 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
4939 { 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 } },
4942 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
4945 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
4948 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
4951 { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 } },
4954 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
4957 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
4960 { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } },
4963 { 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 } }
4977 OpNo0 = OpNo1 = OpNos[1];
4978 }
else if (OpNos[1] < 0) {
4979 OpNo0 = OpNo1 = OpNos[0];
4997 unsigned &OpNo0,
unsigned &OpNo1) {
4998 int OpNos[] = { -1, -1 };
5011 if (OpNos[ModelOpNo] == 1 - RealOpNo)
5013 OpNos[ModelOpNo] = RealOpNo;
5021 unsigned &OpNo0,
unsigned &OpNo1) {
5038 int Elt = Bytes[
From];
5041 Transform[
From] = -1;
5043 while (
P.Bytes[To] != Elt) {
5048 Transform[
From] = To;
5071 if (
auto *VSN = dyn_cast<ShuffleVectorSDNode>(ShuffleOp)) {
5072 Bytes.
resize(NumElements * BytesPerElement, -1);
5073 for (
unsigned I = 0;
I < NumElements; ++
I) {
5074 int Index = VSN->getMaskElt(
I);
5076 for (
unsigned J = 0; J < BytesPerElement; ++J)
5077 Bytes[
I * BytesPerElement + J] =
Index * BytesPerElement + J;
5082 isa<ConstantSDNode>(ShuffleOp.
getOperand(1))) {
5084 Bytes.
resize(NumElements * BytesPerElement, -1);
5085 for (
unsigned I = 0;
I < NumElements; ++
I)
5086 for (
unsigned J = 0; J < BytesPerElement; ++J)
5087 Bytes[
I * BytesPerElement + J] =
Index * BytesPerElement + J;
5098 unsigned BytesPerElement,
int &
Base) {
5100 for (
unsigned I = 0;
I < BytesPerElement; ++
I) {
5101 if (Bytes[Start +
I] >= 0) {
5102 unsigned Elem = Bytes[Start +
I];
5106 if (
unsigned(
Base) % Bytes.
size() + BytesPerElement > Bytes.
size())
5108 }
else if (
unsigned(
Base) != Elem -
I)
5121 unsigned &StartIndex,
unsigned &OpNo0,
5123 int OpNos[] = { -1, -1 };
5125 for (
unsigned I = 0;
I < 16; ++
I) {
5132 Shift = ExpectedShift;
5133 else if (Shift != ExpectedShift)
5137 if (OpNos[ModelOpNo] == 1 - RealOpNo)
5139 OpNos[ModelOpNo] = RealOpNo;
5176 N =
N->getOperand(0);
5178 if (
auto *
Op = dyn_cast<ConstantSDNode>(
N->getOperand(0)))
5179 return Op->getZExtValue() == 0;
5185 for (
unsigned I = 0;
I < Num ;
I++)
5197 for (
unsigned I = 0;
I < 2; ++
I)
5201 unsigned StartIndex, OpNo0, OpNo1;
5210 if (ZeroVecIdx != UINT32_MAX) {
5211 bool MaskFirst =
true;
5216 if (OpNo == ZeroVecIdx &&
I == 0) {
5221 if (OpNo != ZeroVecIdx && Byte == 0) {
5228 if (ZeroIdx != -1) {
5231 if (Bytes[
I] >= 0) {
5234 if (OpNo == ZeroVecIdx)
5244 SDValue Src = ZeroVecIdx == 0 ? Ops[1] : Ops[0];
5262 (!Ops[1].
isUndef() ? Ops[1] : Ops[0]), Op2);
5267struct GeneralShuffle {
5268 GeneralShuffle(
EVT vt) : VT(vt), UnpackFromEltSize(UINT_MAX) {}
5272 void tryPrepareForUnpack();
5273 bool unpackWasPrepared() {
return UnpackFromEltSize <= 4; }
5288 unsigned UnpackFromEltSize;
5293void GeneralShuffle::addUndef() {
5295 for (
unsigned I = 0;
I < BytesPerElement; ++
I)
5296 Bytes.push_back(-1);
5305bool GeneralShuffle::add(
SDValue Op,
unsigned Elem) {
5311 EVT FromVT =
Op.getNode() ?
Op.getValueType() : VT;
5316 if (FromBytesPerElement < BytesPerElement)
5320 (FromBytesPerElement - BytesPerElement));
5323 while (
Op.getNode()) {
5325 Op =
Op.getOperand(0);
5341 }
else if (
Op.isUndef()) {
5350 for (; OpNo < Ops.size(); ++OpNo)
5351 if (Ops[OpNo] ==
Op)
5353 if (OpNo == Ops.size())
5358 for (
unsigned I = 0;
I < BytesPerElement; ++
I)
5359 Bytes.push_back(
Base +
I);
5368 if (Ops.size() == 0)
5372 tryPrepareForUnpack();
5375 if (Ops.size() == 1)
5376 Ops.push_back(DAG.
getUNDEF(MVT::v16i8));
5387 unsigned Stride = 1;
5388 for (; Stride * 2 < Ops.size(); Stride *= 2) {
5389 for (
unsigned I = 0;
I < Ops.size() - Stride;
I += Stride * 2) {
5390 SDValue SubOps[] = { Ops[
I], Ops[
I + Stride] };
5399 else if (OpNo ==
I + Stride)
5410 if (NewBytes[J] >= 0) {
5412 "Invalid double permute");
5415 assert(NewBytesMap[J] < 0 &&
"Invalid double permute");
5421 if (NewBytes[J] >= 0)
5429 Ops[1] = Ops[Stride];
5437 unsigned OpNo0, OpNo1;
5439 if (unpackWasPrepared() && Ops[1].
isUndef())
5441 else if (
const Permute *
P =
matchPermute(Bytes, OpNo0, OpNo1))
5446 Op = insertUnpackIfPrepared(DAG,
DL,
Op);
5453 dbgs() << Msg.c_str() <<
" { ";
5454 for (
unsigned i = 0; i < Bytes.
size(); i++)
5455 dbgs() << Bytes[i] <<
" ";
5463void GeneralShuffle::tryPrepareForUnpack() {
5465 if (ZeroVecOpNo == UINT32_MAX || Ops.size() == 1)
5470 if (Ops.size() > 2 &&
5475 UnpackFromEltSize = 1;
5476 for (; UnpackFromEltSize <= 4; UnpackFromEltSize *= 2) {
5477 bool MatchUnpack =
true;
5480 unsigned ToEltSize = UnpackFromEltSize * 2;
5481 bool IsZextByte = (Elt % ToEltSize) < UnpackFromEltSize;
5484 if (Bytes[Elt] != -1) {
5486 if (IsZextByte != (OpNo == ZeroVecOpNo)) {
5487 MatchUnpack =
false;
5493 if (Ops.size() == 2) {
5496 if (SrcBytes[i] != -1 && SrcBytes[i] % 16 !=
int(i)) {
5497 UnpackFromEltSize = UINT_MAX;
5504 if (UnpackFromEltSize > 4)
5507 LLVM_DEBUG(
dbgs() <<
"Preparing for final unpack of element size "
5508 << UnpackFromEltSize <<
". Zero vector is Op#" << ZeroVecOpNo
5510 dumpBytes(Bytes,
"Original Bytes vector:"););
5515 Elt += UnpackFromEltSize;
5516 for (
unsigned i = 0; i < UnpackFromEltSize; i++, Elt++,
B++)
5517 Bytes[
B] = Bytes[Elt];
5523 Ops.erase(&Ops[ZeroVecOpNo]);
5525 if (Bytes[
I] >= 0) {
5527 if (OpNo > ZeroVecOpNo)
5538 if (!unpackWasPrepared())
5540 unsigned InBits = UnpackFromEltSize * 8;
5544 unsigned OutBits = InBits * 2;
5553 if (!
Op.getOperand(
I).isUndef())
5569 if (
Value.isUndef())
5622 GeneralShuffle GS(VT);
5624 bool FoundOne =
false;
5625 for (
unsigned I = 0;
I < NumElements; ++
I) {
5628 Op =
Op.getOperand(0);
5631 unsigned Elem =
Op.getConstantOperandVal(1);
5632 if (!GS.add(
Op.getOperand(0), Elem))
5635 }
else if (
Op.isUndef()) {
5649 if (!ResidueOps.
empty()) {
5650 while (ResidueOps.
size() < NumElements)
5652 for (
auto &
Op : GS.Ops) {
5653 if (!
Op.getNode()) {
5659 return GS.getNode(DAG,
SDLoc(BVN));
5662bool SystemZTargetLowering::isVectorElementLoad(
SDValue Op)
const {
5663 if (
Op.getOpcode() ==
ISD::LOAD && cast<LoadSDNode>(
Op)->isUnindexed())
5676 unsigned int NumElements = Elems.
size();
5677 unsigned int Count = 0;
5678 for (
auto Elem : Elems) {
5679 if (!Elem.isUndef()) {
5682 else if (Elem != Single) {
5702 if (
Single.getNode() && (Count > 1 || isVectorElementLoad(Single)))
5706 bool AllLoads =
true;
5707 for (
auto Elem : Elems)
5708 if (!isVectorElementLoad(Elem)) {
5714 if (VT == MVT::v2i64 && !AllLoads)
5718 if (VT == MVT::v2f64 && !AllLoads)
5728 if (VT == MVT::v4f32 && !AllLoads) {
5742 DL, MVT::v2i64, Op01, Op23);
5750 unsigned NumConstants = 0;
5751 for (
unsigned I = 0;
I < NumElements; ++
I) {
5765 if (NumConstants > 0) {
5766 for (
unsigned I = 0;
I < NumElements; ++
I)
5777 std::map<const SDNode*, unsigned> UseCounts;
5778 SDNode *LoadMaxUses =
nullptr;
5779 for (
unsigned I = 0;
I < NumElements; ++
I)
5780 if (isVectorElementLoad(Elems[
I])) {
5781 SDNode *Ld = Elems[
I].getNode();
5783 if (LoadMaxUses ==
nullptr || UseCounts[LoadMaxUses] < UseCounts[Ld])
5786 if (LoadMaxUses !=
nullptr) {
5787 ReplicatedVal =
SDValue(LoadMaxUses, 0);
5791 unsigned I1 = NumElements / 2 - 1;
5792 unsigned I2 = NumElements - 1;
5793 bool Def1 = !Elems[
I1].isUndef();
5794 bool Def2 = !Elems[I2].isUndef();
5808 for (
unsigned I = 0;
I < NumElements; ++
I)
5809 if (!
Done[
I] && !Elems[
I].
isUndef() && Elems[
I] != ReplicatedVal)
5817 auto *BVN = cast<BuildVectorSDNode>(
Op.getNode());
5819 EVT VT =
Op.getValueType();
5821 if (BVN->isConstant()) {
5840 for (
unsigned I = 0;
I < NumElements; ++
I)
5841 Ops[
I] =
Op.getOperand(
I);
5842 return buildVector(DAG,
DL, VT, Ops);
5847 auto *VSN = cast<ShuffleVectorSDNode>(
Op.getNode());
5849 EVT VT =
Op.getValueType();
5852 if (VSN->isSplat()) {
5854 unsigned Index = VSN->getSplatIndex();
5856 "Splat index should be defined and in first operand");
5866 GeneralShuffle
GS(VT);
5867 for (
unsigned I = 0;
I < NumElements; ++
I) {
5868 int Elt = VSN->getMaskElt(
I);
5871 else if (!
GS.add(
Op.getOperand(
unsigned(Elt) / NumElements),
5872 unsigned(Elt) % NumElements))
5875 return GS.getNode(DAG,
SDLoc(VSN));
5894 EVT VT =
Op.getValueType();
5899 if (VT == MVT::v2f64 &&
5919SystemZTargetLowering::lowerEXTRACT_VECTOR_ELT(
SDValue Op,
5925 EVT VT =
Op.getValueType();
5929 if (
auto *CIndexN = dyn_cast<ConstantSDNode>(Op1)) {
5944SDValue SystemZTargetLowering::
5947 EVT OutVT =
Op.getValueType();
5957 }
while (FromBits != ToBits);
5962SDValue SystemZTargetLowering::
5966 EVT OutVT =
Op.getValueType();
5970 unsigned NumInPerOut = InNumElts / OutNumElts;
5976 unsigned ZeroVecElt = InNumElts;
5977 for (
unsigned PackedElt = 0; PackedElt < OutNumElts; PackedElt++) {
5978 unsigned MaskElt = PackedElt * NumInPerOut;
5979 unsigned End = MaskElt + NumInPerOut - 1;
5980 for (; MaskElt <
End; MaskElt++)
5981 Mask[MaskElt] = ZeroVecElt++;
5982 Mask[MaskElt] = PackedElt;
5989 unsigned ByScalar)
const {
5994 EVT VT =
Op.getValueType();
5998 if (
auto *BVN = dyn_cast<BuildVectorSDNode>(Op1)) {
5999 APInt SplatBits, SplatUndef;
6000 unsigned SplatBitSize;
6004 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs,
6005 ElemBitSize,
true) &&
6006 SplatBitSize == ElemBitSize) {
6009 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6018 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6024 if (
auto *VSN = dyn_cast<ShuffleVectorSDNode>(Op1)) {
6025 if (VSN->isSplat()) {
6027 unsigned Index = VSN->getSplatIndex();
6029 "Splat index should be defined and in first operand");
6036 return DAG.
getNode(ByScalar,
DL, VT, Op0, Shift);
6048 MVT ResultVT =
Op.getSimpleValueType();
6050 unsigned Check =
Op.getConstantOperandVal(1);
6052 unsigned TDCMask = 0;
6081 switch (
Op.getOpcode()) {
6083 return lowerFRAMEADDR(
Op, DAG);
6085 return lowerRETURNADDR(
Op, DAG);
6087 return lowerBR_CC(
Op, DAG);
6089 return lowerSELECT_CC(
Op, DAG);
6091 return lowerSETCC(
Op, DAG);
6093 return lowerSTRICT_FSETCC(
Op, DAG,
false);
6095 return lowerSTRICT_FSETCC(
Op, DAG,
true);
6097 return lowerGlobalAddress(cast<GlobalAddressSDNode>(
Op), DAG);
6099 return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(
Op), DAG);
6101 return lowerBlockAddress(cast<BlockAddressSDNode>(
Op), DAG);
6103 return lowerJumpTable(cast<JumpTableSDNode>(
Op), DAG);
6105 return lowerConstantPool(cast<ConstantPoolSDNode>(
Op), DAG);
6107 return lowerBITCAST(
Op, DAG);
6109 return lowerVASTART(
Op, DAG);
6111 return lowerVACOPY(
Op, DAG);
6113 return lowerDYNAMIC_STACKALLOC(
Op, DAG);
6115 return lowerGET_DYNAMIC_AREA_OFFSET(
Op, DAG);
6117 return lowerSMUL_LOHI(
Op, DAG);
6119 return lowerUMUL_LOHI(
Op, DAG);
6121 return lowerSDIVREM(
Op, DAG);
6123 return lowerUDIVREM(
Op, DAG);
6128 return lowerXALUO(
Op, DAG);
6131 return lowerUADDSUBO_CARRY(
Op, DAG);
6133 return lowerOR(
Op, DAG);
6135 return lowerCTPOP(
Op, DAG);
6137 return lowerATOMIC_FENCE(
Op, DAG);
6141 return lowerATOMIC_STORE(
Op, DAG);
6143 return lowerATOMIC_LOAD(
Op, DAG);
6147 return lowerATOMIC_LOAD_SUB(
Op, DAG);
6165 return lowerATOMIC_CMP_SWAP(
Op, DAG);
6167 return lowerSTACKSAVE(
Op, DAG);
6169 return lowerSTACKRESTORE(
Op, DAG);
6171 return lowerPREFETCH(
Op, DAG);
6173 return lowerINTRINSIC_W_CHAIN(
Op, DAG);
6175 return lowerINTRINSIC_WO_CHAIN(
Op, DAG);
6177 return lowerBUILD_VECTOR(
Op, DAG);
6179 return lowerVECTOR_SHUFFLE(
Op, DAG);
6181 return lowerSCALAR_TO_VECTOR(
Op, DAG);
6183 return lowerINSERT_VECTOR_ELT(
Op, DAG);
6185 return lowerEXTRACT_VECTOR_ELT(
Op, DAG);
6187 return lowerSIGN_EXTEND_VECTOR_INREG(
Op, DAG);
6189 return lowerZERO_EXTEND_VECTOR_INREG(
Op, DAG);
6199 return lowerIS_FPCLASS(
Op, DAG);
6201 return lowerGET_ROUNDING(
Op, DAG);
6213 switch (
N->getOpcode()) {
6217 SDValue Ops[] = {
N->getOperand(0),
N->getOperand(1) };
6220 DL, Tys, Ops, MVT::i128, MMO);
6232 DL, Tys, Ops, MVT::i128, MMO);
6235 if (cast<AtomicSDNode>(
N)->getSuccessOrdering() ==
6238 MVT::Other, Res), 0);
6245 SDValue Ops[] = {
N->getOperand(0),
N->getOperand(1),
6250 DL, Tys, Ops, MVT::i128, MMO);
6261 if (
N->getValueType(0) == MVT::i128 && Src.getValueType() == MVT::f128 &&
6265 if (getRepRegClassFor(MVT::f128) == &SystemZ::VR128BitRegClass) {
6272 assert(getRepRegClassFor(MVT::f128) == &SystemZ::FP128BitRegClass &&
6273 "Unrecognized register class for f128.");
6298#define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME
6409 OPCODE(ATOMIC_LOADW_ADD);
6410 OPCODE(ATOMIC_LOADW_SUB);
6411 OPCODE(ATOMIC_LOADW_AND);
6413 OPCODE(ATOMIC_LOADW_XOR);
6414 OPCODE(ATOMIC_LOADW_NAND);
6415 OPCODE(ATOMIC_LOADW_MIN);
6416 OPCODE(ATOMIC_LOADW_MAX);
6417 OPCODE(ATOMIC_LOADW_UMIN);
6418 OPCODE(ATOMIC_LOADW_UMAX);
6419 OPCODE(ATOMIC_CMP_SWAPW);
6422 OPCODE(ATOMIC_STORE_128);
6423 OPCODE(ATOMIC_CMP_SWAP_128);
6437bool SystemZTargetLowering::canTreatAsByteVector(
EVT VT)
const {
6438 if (!Subtarget.hasVector())
6452 DAGCombinerInfo &DCI,
6460 unsigned Opcode =
Op.getOpcode();
6463 Op =
Op.getOperand(0);
6465 canTreatAsByteVector(
Op.getValueType())) {
6474 BytesPerElement,
First))
6481 if (Byte % BytesPerElement != 0)
6484 Index = Byte / BytesPerElement;
6488 canTreatAsByteVector(
Op.getValueType())) {
6491 EVT OpVT =
Op.getValueType();
6493 if (OpBytesPerElement < BytesPerElement)
6497 unsigned End = (
Index + 1) * BytesPerElement;
6498 if (
End % OpBytesPerElement != 0)
6501 Op =
Op.getOperand(
End / OpBytesPerElement - 1);
6502 if (!
Op.getValueType().isInteger()) {
6505 DCI.AddToWorklist(
Op.getNode());
6510 DCI.AddToWorklist(
Op.getNode());
6517 canTreatAsByteVector(
Op.getValueType()) &&
6518 canTreatAsByteVector(
Op.getOperand(0).getValueType())) {
6520 EVT ExtVT =
Op.getValueType();
6521 EVT OpVT =
Op.getOperand(0).getValueType();
6524 unsigned Byte =
Index * BytesPerElement;
6525 unsigned SubByte =
Byte % ExtBytesPerElement;
6526 unsigned MinSubByte = ExtBytesPerElement - OpBytesPerElement;
6527 if (SubByte < MinSubByte ||
6528 SubByte + BytesPerElement > ExtBytesPerElement)
6531 Byte =
Byte / ExtBytesPerElement * OpBytesPerElement;
6533 Byte += SubByte - MinSubByte;
6534 if (Byte % BytesPerElement != 0)
6536 Op =
Op.getOperand(0);
6543 if (
Op.getValueType() != VecVT) {
6545 DCI.AddToWorklist(
Op.getNode());
6555SDValue SystemZTargetLowering::combineTruncateExtract(
6564 if (canTreatAsByteVector(VecVT)) {
6565 if (
auto *IndexN = dyn_cast<ConstantSDNode>(
Op.getOperand(1))) {
6568 if (BytesPerElement % TruncBytes == 0) {
6574 unsigned Scale = BytesPerElement / TruncBytes;
6575 unsigned NewIndex = (IndexN->getZExtValue() + 1) * Scale - 1;
6581 EVT ResVT = (TruncBytes < 4 ? MVT::i32 : TruncVT);
6582 return combineExtract(
DL, ResVT, VecVT, Vec, NewIndex, DCI,
true);
6590SDValue SystemZTargetLowering::combineZERO_EXTEND(
6591 SDNode *
N, DAGCombinerInfo &DCI)
const {
6595 EVT VT =
N->getValueType(0);
6597 auto *TrueOp = dyn_cast<ConstantSDNode>(N0.
getOperand(0));
6598 auto *FalseOp = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
6599 if (TrueOp && FalseOp) {
6609 DCI.CombineTo(N0.
getNode(), TruncSelect);
6617SDValue SystemZTargetLowering::combineSIGN_EXTEND_INREG(
6618 SDNode *
N, DAGCombinerInfo &DCI)
const {
6624 EVT VT =
N->getValueType(0);
6625 EVT EVT = cast<VTSDNode>(
N->getOperand(1))->getVT();
6638SDValue SystemZTargetLowering::combineSIGN_EXTEND(
6639 SDNode *
N, DAGCombinerInfo &DCI)
const {
6645 EVT VT =
N->getValueType(0);
6647 auto *SraAmt = dyn_cast<ConstantSDNode>(N0.
getOperand(1));
6650 if (
auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.
getOperand(1))) {
6652 unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra;
6653 unsigned NewSraAmt = SraAmt->getZExtValue() + Extra;
6668SDValue SystemZTargetLowering::combineMERGE(
6669 SDNode *
N, DAGCombinerInfo &DCI)
const {
6671 unsigned Opcode =
N->getOpcode();
6679 if (Op1 ==
N->getOperand(0))
6684 if (ElemBytes <= 4) {
6692 DCI.AddToWorklist(Op1.
getNode());
6695 DCI.AddToWorklist(
Op.getNode());
6702SDValue SystemZTargetLowering::combineLOAD(
6703 SDNode *
N, DAGCombinerInfo &DCI)
const {
6705 EVT LdVT =
N->getValueType(0);
6710 if (LdVT == MVT::i128) {
6717 int UsedElements = 0;
6719 UI != UIEnd; ++UI) {
6721 if (UI.getUse().getResNo() != 0)
6734 User->getValueType(0) != MVT::i64)
6738 if (UsedElements & (1 <<
Index))
6741 UsedElements |= 1 <<
Index;
6747 for (
auto UserAndIndex :
Users) {
6749 unsigned Offset =
User->getValueType(0).getStoreSize() * UserAndIndex.second;
6754 LD->getPointerInfo().getWithOffset(
Offset),
6755 LD->getOriginalAlign(),
LD->getMemOperand()->getFlags(),
6758 DCI.CombineTo(
User, EltLoad,
true);
6766 DCI.AddToWorklist(Chain.
getNode());
6787 else if (UI.getUse().getResNo() == 0)
6790 if (!Replicate || OtherUses.
empty())
6796 for (
SDNode *U : OtherUses) {
6805bool SystemZTargetLowering::canLoadStoreByteSwapped(
EVT VT)
const {
6806 if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64)
6808 if (Subtarget.hasVectorEnhancements2())
6809 if (VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v2i64 || VT == MVT::i128)
6821 for (
unsigned i = 0; i < NumElts; ++i) {
6822 if (M[i] < 0)
continue;
6823 if ((
unsigned) M[i] != NumElts - 1 - i)
6831 for (
auto *U : StoredVal->
uses()) {
6833 EVT CurrMemVT = ST->getMemoryVT().getScalarType();
6836 }
else if (isa<BuildVectorSDNode>(U)) {
6874SDValue SystemZTargetLowering::combineSTORE(
6875 SDNode *
N, DAGCombinerInfo &DCI)
const {
6877 auto *SN = cast<StoreSDNode>(
N);
6878 auto &Op1 =
N->getOperand(1);
6879 EVT MemVT = SN->getMemoryVT();
6884 if (MemVT.
isInteger() && SN->isTruncatingStore()) {
6886 combineTruncateExtract(
SDLoc(
N), MemVT, SN->getValue(), DCI)) {
6887 DCI.AddToWorklist(
Value.getNode());
6891 SN->getBasePtr(), SN->getMemoryVT(),
6892 SN->getMemOperand());
6896 if (!SN->isTruncatingStore() &&
6907 N->getOperand(0), BSwapOp,
N->getOperand(2)
6912 Ops, MemVT, SN->getMemOperand());
6915 if (!SN->isTruncatingStore() &&
6918 Subtarget.hasVectorEnhancements2()) {
6928 Ops, MemVT, SN->getMemOperand());
6938 DAG.
getStore(SN->getChain(),
DL, HiPart, SN->getBasePtr(),
6939 SN->getPointerInfo(), SN->getOriginalAlign(),
6940 SN->getMemOperand()->getFlags(), SN->getAAInfo());
6945 SN->getPointerInfo().getWithOffset(8),
6946 SN->getOriginalAlign(),
6947 SN->getMemOperand()->
getFlags(), SN->getAAInfo());
6967 if (
C->getAPIntValue().getBitWidth() > 64 ||
C->isAllOnes() ||
6971 if (VCI.isVectorConstantLegal(Subtarget) &&
6980 auto FindReplicatedReg = [&](
SDValue MulOp) {
6981 EVT MulVT = MulOp.getValueType();
6982 if (MulOp->getOpcode() ==
ISD::MUL &&
6983 (MulVT == MVT::i16 || MulVT == MVT::i32 || MulVT == MVT::i64)) {
6987 WordVT =
LHS->getOperand(0).getValueType();
6989 WordVT = cast<VTSDNode>(
LHS->getOperand(1))->getVT();
6993 if (
auto *
C = dyn_cast<ConstantSDNode>(MulOp->getOperand(1))) {
6995 APInt(MulVT.getSizeInBits(),
C->getZExtValue()));
6996 if (VCI.isVectorConstantLegal(Subtarget) &&
6998 WordVT == VCI.VecVT.getScalarType())
7004 if (isa<BuildVectorSDNode>(Op1) &&
7007 if (
auto *
C = dyn_cast<ConstantSDNode>(SplatVal))
7010 FindReplicatedReg(SplatVal);
7012 if (
auto *
C = dyn_cast<ConstantSDNode>(Op1))
7015 FindReplicatedReg(Op1);
7020 "Bad type handling");
7025 SN->getBasePtr(), SN->getMemOperand());
7032SDValue SystemZTargetLowering::combineVECTOR_SHUFFLE(
7033 SDNode *
N, DAGCombinerInfo &DCI)
const {
7037 N->getOperand(0).hasOneUse() &&
7038 Subtarget.hasVectorEnhancements2()) {
7053 Ops,
LD->getMemoryVT(),
LD->getMemOperand());
7057 DCI.CombineTo(
N, ESLoad);
7061 DCI.CombineTo(
Load.getNode(), ESLoad, ESLoad.
getValue(1));
7071SDValue SystemZTargetLowering::combineEXTRACT_VECTOR_ELT(
7072 SDNode *
N, DAGCombinerInfo &DCI)
const {
7075 if (!Subtarget.hasVector())
7081 Op.getValueType().isVector() &&
7082 Op.getOperand(0).getValueType().isVector() &&
7083 Op.getValueType().getVectorNumElements() ==
7084 Op.getOperand(0).getValueType().getVectorNumElements())
7085 Op =
Op.getOperand(0);
7089 EVT VecVT =
Op.getValueType();
7092 Op.getOperand(0),
N->getOperand(1));
7093 DCI.AddToWorklist(
Op.getNode());
7095 if (EltVT !=
N->getValueType(0)) {
7096 DCI.AddToWorklist(
Op.getNode());
7103 if (
auto *IndexN = dyn_cast<ConstantSDNode>(
N->getOperand(1))) {
7106 return combineExtract(
SDLoc(
N),
N->getValueType(0), VecVT, Op0,
7107 IndexN->getZExtValue(), DCI,
false);
7112SDValue SystemZTargetLowering::combineJOIN_DWORDS(
7113 SDNode *
N, DAGCombinerInfo &DCI)
const {
7116 if (
N->getOperand(0) ==
N->getOperand(1))
7127 if (Chain1 == Chain2)
7135SDValue SystemZTargetLowering::combineFP_ROUND(
7136 SDNode *
N, DAGCombinerInfo &DCI)
const {
7138 if (!Subtarget.hasVector())
7147 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
7150 if (
N->getValueType(0) == MVT::f32 && Op0.
hasOneUse() &&
7156 for (
auto *U : Vec->
uses()) {
7157 if (U != Op0.
getNode() &&
U->hasOneUse() &&
7159 U->getOperand(0) == Vec &&
7161 U->getConstantOperandVal(1) == 1) {
7163 if (OtherRound.
getOpcode() ==
N->getOpcode() &&
7167 if (
N->isStrictFPOpcode()) {
7172 {MVT::v4f32, MVT::Other}, {Chain, Vec});
7177 DCI.AddToWorklist(VRound.
getNode());
7181 DCI.AddToWorklist(Extract1.
getNode());
7190 N->getVTList(), Extract0, Chain);
7199SDValue SystemZTargetLowering::combineFP_EXTEND(
7200 SDNode *
N, DAGCombinerInfo &DCI)
const {
7202 if (!Subtarget.hasVector())
7211 unsigned OpNo =
N->isStrictFPOpcode() ? 1 : 0;
7214 if (
N->getValueType(0) == MVT::f64 && Op0.
hasOneUse() &&
7220 for (
auto *U : Vec->
uses()) {
7221 if (U != Op0.
getNode() &&
U->hasOneUse() &&
7223 U->getOperand(0) == Vec &&
7225 U->getConstantOperandVal(1) == 2) {
7227 if (OtherExtend.
getOpcode() ==
N->getOpcode() &&
7231 if (
N->isStrictFPOpcode()) {
7236 {MVT::v2f64, MVT::Other}, {Chain, Vec});
7241 DCI.AddToWorklist(VExtend.
getNode());
7245 DCI.AddToWorklist(Extract1.
getNode());
7254 N->getVTList(), Extract0, Chain);
7263SDValue SystemZTargetLowering::combineINT_TO_FP(
7264 SDNode *
N, DAGCombinerInfo &DCI)
const {
7269 unsigned Opcode =
N->getOpcode();
7270 EVT OutVT =
N->getValueType(0);
7274 unsigned InScalarBits =
Op->getValueType(0).getScalarSizeInBits();
7280 if (OutLLVMTy->
isVectorTy() && OutScalarBits > InScalarBits &&
7281 OutScalarBits <= 64) {
7282 unsigned NumElts = cast<FixedVectorType>(OutLLVMTy)->getNumElements();
7285 unsigned ExtOpcode =
7293SDValue SystemZTargetLowering::combineBSWAP(
7294 SDNode *
N, DAGCombinerInfo &DCI)
const {
7298 N->getOperand(0).hasOneUse() &&
7299 canLoadStoreByteSwapped(
N->getValueType(0))) {
7308 EVT LoadVT =
N->getValueType(0);
7309 if (LoadVT == MVT::i16)
7314 Ops,
LD->getMemoryVT(),
LD->getMemOperand());
7318 if (
N->getValueType(0) == MVT::i16)
7323 DCI.CombineTo(
N, ResVal);
7327 DCI.CombineTo(
Load.getNode(), ResVal, BSLoad.
getValue(1));
7336 Op.getValueType().isVector() &&
7337 Op.getOperand(0).getValueType().isVector() &&
7338 Op.getValueType().getVectorNumElements() ==
7339 Op.getOperand(0).getValueType().getVectorNumElements())
7340 Op =
Op.getOperand(0);
7352 (canLoadStoreByteSwapped(
N->getValueType(0)) &&
7354 EVT VecVT =
N->getValueType(0);
7355 EVT EltVT =
N->getValueType(0).getVectorElementType();
7358 DCI.AddToWorklist(Vec.
getNode());
7362 DCI.AddToWorklist(Elt.
getNode());
7365 DCI.AddToWorklist(Vec.
getNode());
7367 DCI.AddToWorklist(Elt.
getNode());
7375 if (SV &&
Op.hasOneUse()) {
7383 EVT VecVT =
N->getValueType(0);
7386 DCI.AddToWorklist(Op0.
getNode());
7390 DCI.AddToWorklist(Op1.
getNode());
7393 DCI.AddToWorklist(Op0.
getNode());
7395 DCI.AddToWorklist(Op1.
getNode());
7417 auto *CompareRHS = dyn_cast<ConstantSDNode>(ICmp->getOperand(1));
7424 bool Invert =
false;
7431 auto *TrueVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(0));
7434 auto *FalseVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1));
7437 if (CompareRHS->getZExtValue() == FalseVal->getZExtValue())
7439 else if (CompareRHS->getZExtValue() != TrueVal->getZExtValue())
7443 auto *NewCCValid = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(2));
7444 auto *NewCCMask = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(3));
7445 if (!NewCCValid || !NewCCMask)
7447 CCValid = NewCCValid->getZExtValue();
7448 CCMask = NewCCMask->getZExtValue();
7458 if (CompareLHS->getOpcode() ==
ISD::SRA) {
7459 auto *SRACount = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1));
7460 if (!SRACount || SRACount->getZExtValue() != 30)
7462 auto *SHL = CompareLHS->getOperand(0).getNode();
7465 auto *SHLCount = dyn_cast<ConstantSDNode>(SHL->getOperand(1));
7468 auto *IPM = SHL->getOperand(0).getNode();
7473 if (!CompareLHS->hasOneUse())
7476 if (CompareRHS->getZExtValue() != 0)
7483 CCReg = IPM->getOperand(0);
7490SDValue SystemZTargetLowering::combineBR_CCMASK(
7491 SDNode *
N, DAGCombinerInfo &DCI)
const {
7495 auto *CCValid = dyn_cast<ConstantSDNode>(
N->getOperand(1));
7496 auto *CCMask = dyn_cast<ConstantSDNode>(
N->getOperand(2));
7497 if (!CCValid || !CCMask)
7500 int CCValidVal = CCValid->getZExtValue();
7501 int CCMaskVal = CCMask->getZExtValue();
7510 N->getOperand(3), CCReg);
7514SDValue SystemZTargetLowering::combineSELECT_CCMASK(
7515 SDNode *
N, DAGCombinerInfo &DCI)
const {
7519 auto *CCValid = dyn_cast<ConstantSDNode>(
N->getOperand(2));
7520 auto *CCMask = dyn_cast<ConstantSDNode>(
N->getOperand(3));
7521 if (!CCValid || !CCMask)
7524 int CCValidVal = CCValid->getZExtValue();
7525 int CCMaskVal = CCMask->getZExtValue();
7530 N->getOperand(0),
N->getOperand(1),
7538SDValue SystemZTargetLowering::combineGET_CCMASK(
7539 SDNode *
N, DAGCombinerInfo &DCI)
const {
7542 auto *CCValid = dyn_cast<ConstantSDNode>(
N->getOperand(1));
7543 auto *CCMask = dyn_cast<ConstantSDNode>(
N->getOperand(2));
7544 if (!CCValid || !CCMask)
7546 int CCValidVal = CCValid->getZExtValue();
7547 int CCMaskVal = CCMask->getZExtValue();
7555 auto *SelectCCValid = dyn_cast<ConstantSDNode>(
Select->getOperand(2));
7556 auto *SelectCCMask = dyn_cast<ConstantSDNode>(
Select->getOperand(3));
7557 if (!SelectCCValid || !SelectCCMask)
7559 int SelectCCValidVal = SelectCCValid->getZExtValue();
7560 int SelectCCMaskVal = SelectCCMask->getZExtValue();
7562 auto *
TrueVal = dyn_cast<ConstantSDNode>(
Select->getOperand(0));
7563 auto *
FalseVal = dyn_cast<ConstantSDNode>(
Select->getOperand(1));
7564 if (!TrueVal || !FalseVal)
7568 else if (
TrueVal->getZExtValue() == 0 &&
FalseVal->getZExtValue() == 1)
7569 SelectCCMaskVal ^= SelectCCValidVal;
7573 if (SelectCCValidVal & ~CCValidVal)
7575 if (SelectCCMaskVal != (CCMaskVal & SelectCCValidVal))
7578 return Select->getOperand(4);
7581SDValue SystemZTargetLowering::combineIntDIVREM(
7582 SDNode *
N, DAGCombinerInfo &DCI)
const {
7584 EVT VT =
N->getValueType(0);
7598SDValue SystemZTargetLowering::combineINTRINSIC(
7599 SDNode *
N, DAGCombinerInfo &DCI)
const {
7602 unsigned Id =
N->getConstantOperandVal(1);
7606 case Intrinsic::s390_vll:
7607 case Intrinsic::s390_vlrl:
7608 if (
auto *
C = dyn_cast<ConstantSDNode>(
N->getOperand(2)))
7609 if (
C->getZExtValue() >= 15)
7614 case Intrinsic::s390_vstl:
7615 case Intrinsic::s390_vstrl:
7616 if (
auto *
C = dyn_cast<ConstantSDNode>(
N->getOperand(3)))
7617 if (
C->getZExtValue() >= 15)
7628 return N->getOperand(0);
7634 switch(
N->getOpcode()) {
7659 case ISD::UREM:
return combineIntDIVREM(
N, DCI);
7671 EVT VT =
Op.getValueType();
7674 unsigned Opcode =
Op.getOpcode();
7676 unsigned Id =
Op.getConstantOperandVal(0);
7678 case Intrinsic::s390_vpksh:
7679 case Intrinsic::s390_vpksf:
7680 case Intrinsic::s390_vpksg:
7681 case Intrinsic::s390_vpkshs:
7682 case Intrinsic::s390_vpksfs:
7683 case Intrinsic::s390_vpksgs:
7684 case Intrinsic::s390_vpklsh:
7685 case Intrinsic::s390_vpklsf:
7686 case Intrinsic::s390_vpklsg:
7687 case Intrinsic::s390_vpklshs:
7688 case Intrinsic::s390_vpklsfs:
7689 case Intrinsic::s390_vpklsgs:
7691 SrcDemE = DemandedElts;
7694 SrcDemE = SrcDemE.
trunc(NumElts / 2);
7697 case Intrinsic::s390_vuphb:
7698 case Intrinsic::s390_vuphh:
7699 case Intrinsic::s390_vuphf:
7700 case Intrinsic::s390_vuplhb:
7701 case Intrinsic::s390_vuplhh:
7702 case Intrinsic::s390_vuplhf:
7703 SrcDemE =
APInt(NumElts * 2, 0);
7706 case Intrinsic::s390_vuplb:
7707 case Intrinsic::s390_vuplhw:
7708 case Intrinsic::s390_vuplf:
7709 case Intrinsic::s390_vupllb:
7710 case Intrinsic::s390_vupllh:
7711 case Intrinsic::s390_vupllf:
7712 SrcDemE =
APInt(NumElts * 2, 0);
7715 case Intrinsic::s390_vpdi: {
7717 SrcDemE =
APInt(NumElts, 0);
7718 if (!DemandedElts[OpNo - 1])
7720 unsigned Mask =
Op.getConstantOperandVal(3);
7721 unsigned MaskBit = ((OpNo - 1) ? 1 : 4);
7723 SrcDemE.
setBit((Mask & MaskBit)? 1 : 0);
7726 case Intrinsic::s390_vsldb: {
7728 assert(VT == MVT::v16i8 &&
"Unexpected type.");
7729 unsigned FirstIdx =
Op.getConstantOperandVal(3);
7730 assert (FirstIdx > 0 && FirstIdx < 16 &&
"Unused operand.");
7731 unsigned NumSrc0Els = 16 - FirstIdx;
7732 SrcDemE =
APInt(NumElts, 0);
7734 APInt DemEls = DemandedElts.
trunc(NumSrc0Els);
7737 APInt DemEls = DemandedElts.
lshr(NumSrc0Els);
7742 case Intrinsic::s390_vperm:
7743 SrcDemE =
APInt(NumElts, 1);
7753 SrcDemE =
APInt(1, 1);
7756 SrcDemE = DemandedElts;
7767 const APInt &DemandedElts,
7782 const APInt &DemandedElts,
7784 unsigned Depth)
const {
7788 unsigned tmp0, tmp1;
7793 EVT VT =
Op.getValueType();
7794 if (
Op.getResNo() != 0 || VT == MVT::Untyped)
7797 "KnownBits does not match VT in bitwidth");
7800 "DemandedElts does not match VT number of elements");
7802 unsigned Opcode =
Op.getOpcode();
7804 bool IsLogical =
false;
7805 unsigned Id =
Op.getConstantOperandVal(0);
7807 case Intrinsic::s390_vpksh:
7808 case Intrinsic::s390_vpksf:
7809 case Intrinsic::s390_vpksg:
7810 case Intrinsic::s390_vpkshs:
7811 case Intrinsic::s390_vpksfs:
7812 case Intrinsic::s390_vpksgs:
7813 case Intrinsic::s390_vpklsh:
7814 case Intrinsic::s390_vpklsf:
7815 case Intrinsic::s390_vpklsg:
7816 case Intrinsic::s390_vpklshs:
7817 case Intrinsic::s390_vpklsfs:
7818 case Intrinsic::s390_vpklsgs:
7819 case Intrinsic::s390_vpdi:
7820 case Intrinsic::s390_vsldb:
7821 case Intrinsic::s390_vperm:
7824 case Intrinsic::s390_vuplhb:
7825 case Intrinsic::s390_vuplhh:
7826 case Intrinsic::s390_vuplhf:
7827 case Intrinsic::s390_vupllb:
7828 case Intrinsic::s390_vupllh:
7829 case Intrinsic::s390_vupllf:
7832 case Intrinsic::s390_vuphb:
7833 case Intrinsic::s390_vuphh:
7834 case Intrinsic::s390_vuphf:
7835 case Intrinsic::s390_vuplb:
7836 case Intrinsic::s390_vuplhw:
7837 case Intrinsic::s390_vuplf: {
7879 if (
LHS == 1)
return 1;
7882 if (
RHS == 1)
return 1;
7883 unsigned Common = std::min(
LHS,
RHS);
7884 unsigned SrcBitWidth =
Op.getOperand(OpNo).getScalarValueSizeInBits();
7885 EVT VT =
Op.getValueType();
7887 if (SrcBitWidth > VTBits) {
7888 unsigned SrcExtraBits = SrcBitWidth - VTBits;
7889 if (Common > SrcExtraBits)
7890 return (Common - SrcExtraBits);
7893 assert (SrcBitWidth == VTBits &&
"Expected operands of same bitwidth.");
7900 unsigned Depth)
const {
7901 if (
Op.getResNo() != 0)
7903 unsigned Opcode =
Op.getOpcode();
7905 unsigned Id =
Op.getConstantOperandVal(0);
7907 case Intrinsic::s390_vpksh:
7908 case Intrinsic::s390_vpksf:
7909 case Intrinsic::s390_vpksg:
7910 case Intrinsic::s390_vpkshs:
7911 case Intrinsic::s390_vpksfs:
7912 case Intrinsic::s390_vpksgs:
7913 case Intrinsic::s390_vpklsh:
7914 case Intrinsic::s390_vpklsf:
7915 case Intrinsic::s390_vpklsg:
7916 case Intrinsic::s390_vpklshs:
7917 case Intrinsic::s390_vpklsfs:
7918 case Intrinsic::s390_vpklsgs:
7919 case Intrinsic::s390_vpdi:
7920 case Intrinsic::s390_vsldb:
7921 case Intrinsic::s390_vperm:
7923 case Intrinsic::s390_vuphb:
7924 case Intrinsic::s390_vuphh:
7925 case Intrinsic::s390_vuphf:
7926 case Intrinsic::s390_vuplb:
7927 case Intrinsic::s390_vuplhw:
7928 case Intrinsic::s390_vuplf: {
7932 EVT VT =
Op.getValueType();
7956 switch (
Op->getOpcode()) {
7969 "Unexpected stack alignment");
7972 unsigned StackProbeSize =
7975 StackProbeSize &= ~(StackAlign - 1);
7976 return StackProbeSize ? StackProbeSize : StackAlign;
7993 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
7999 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
8025 if (Succ->isLiveIn(SystemZ::CC))
8036 switch (
MI.getOpcode()) {
8037 case SystemZ::Select32:
8038 case SystemZ::Select64:
8039 case SystemZ::Select128:
8040 case SystemZ::SelectF32:
8041 case SystemZ::SelectF64:
8042 case SystemZ::SelectF128:
8043 case SystemZ::SelectVR32:
8044 case SystemZ::SelectVR64:
8045 case SystemZ::SelectVR128:
8077 for (
auto *
MI : Selects) {
8078 Register DestReg =
MI->getOperand(0).getReg();
8079 Register TrueReg =
MI->getOperand(1).getReg();
8080 Register FalseReg =
MI->getOperand(2).getReg();
8085 if (
MI->getOperand(4).getImm() == (CCValid ^ CCMask))
8088 if (RegRewriteTable.
contains(TrueReg))
8089 TrueReg = RegRewriteTable[TrueReg].first;
8091 if (RegRewriteTable.
contains(FalseReg))
8092 FalseReg = RegRewriteTable[FalseReg].second;
8095 BuildMI(*SinkMBB, SinkInsertionPoint,
DL,
TII->get(SystemZ::PHI), DestReg)
8100 RegRewriteTable[DestReg] = std::make_pair(TrueReg, FalseReg);
8113 unsigned CCValid =
MI.getOperand(3).getImm();
8114 unsigned CCMask =
MI.getOperand(4).getImm();
8126 assert(NextMI.getOperand(3).getImm() == CCValid &&
8127 "Bad CCValid operands since CC was not redefined.");
8128 if (NextMI.getOperand(4).getImm() == CCMask ||
8129 NextMI.getOperand(4).getImm() == (CCValid ^ CCMask)) {
8135 if (NextMI.definesRegister(SystemZ::CC) || NextMI.usesCustomInsertionHook())
8138 for (
auto *SelMI : Selects)
8139 if (NextMI.readsVirtualRegister(SelMI->getOperand(0).getReg())) {
8143 if (NextMI.isDebugInstr()) {
8145 assert(NextMI.isDebugValue() &&
"Unhandled debug opcode.");
8148 }
else if (
User || ++Count > 20)
8185 for (
auto *SelMI : Selects)
8186 SelMI->eraseFromParent();
8189 for (
auto *DbgMI : DbgValues)
8190 MBB->
splice(InsertPos, StartMBB, DbgMI);
8201 unsigned StoreOpcode,
8202 unsigned STOCOpcode,
8203 bool Invert)
const {
8208 int64_t Disp =
MI.getOperand(2).getImm();
8209 Register IndexReg =
MI.getOperand(3).getReg();
8210 unsigned CCValid =
MI.getOperand(4).getImm();
8211 unsigned CCMask =
MI.getOperand(5).getImm();
8214 StoreOpcode =
TII->getOpcodeForOffset(StoreOpcode, Disp);
8219 for (
auto *
I :
MI.memoperands())
8228 if (STOCOpcode && !IndexReg && Subtarget.hasLoadStoreOnCond()) {
8240 MI.eraseFromParent();
8280 MI.eraseFromParent();
8316 int HiOpcode =
Unsigned? SystemZ::VECLG : SystemZ::VECG;
8335 Register Temp =
MRI.createVirtualRegister(&SystemZ::VR128BitRegClass);
8343 MI.eraseFromParent();
8354 bool Invert)
const {
8363 int64_t Disp =
MI.getOperand(2).getImm();
8365 Register BitShift =
MI.getOperand(4).getReg();
8366 Register NegBitShift =
MI.getOperand(5).getReg();
8367 unsigned BitSize =
MI.getOperand(6).getImm();
8371 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
8372 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
8373 assert(LOpcode && CSOpcode &&
"Displacement out of range");
8376 Register OrigVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8377 Register OldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8378 Register NewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8379 Register RotatedOldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8380 Register RotatedNewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8411 Register Tmp =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8416 }
else if (BinOpcode)
8439 MI.eraseFromParent();
8450 unsigned KeepOldMask)
const {
8458 int64_t Disp =
MI.getOperand(2).getImm();
8460 Register BitShift =
MI.getOperand(4).getReg();
8461 Register NegBitShift =
MI.getOperand(5).getReg();
8462 unsigned BitSize =
MI.getOperand(6).getImm();
8466 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
8467 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
8468 assert(LOpcode && CSOpcode &&
"Displacement out of range");
8471 Register OrigVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8472 Register OldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8473 Register NewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8474 Register RotatedOldVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8475 Register RotatedAltVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8476 Register RotatedNewVal =
MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
8543 MI.eraseFromParent();
8559 int64_t Disp =
MI.getOperand(2).getImm();
8561 Register OrigSwapVal =
MI.getOperand(4).getReg();
8562 Register BitShift =
MI.getOperand(5).getReg();
8563 Register NegBitShift =
MI.getOperand(6).getReg();
8564 int64_t BitSize =
MI.getOperand(7).getImm();
8570 unsigned LOpcode =
TII->getOpcodeForOffset(SystemZ::L, Disp);
8571 unsigned CSOpcode =
TII->getOpcodeForOffset(SystemZ::CS, Disp);
8572 unsigned ZExtOpcode = BitSize == 8 ? SystemZ::LLCR : SystemZ::LLHR;
8573 assert(LOpcode && CSOpcode &&
"Displacement out of range");
8576 Register OrigOldVal =
MRI.createVirtualRegister(RC);
8579 Register StoreVal =
MRI.createVirtualRegister(RC);
8580 Register OldValRot =
MRI.createVirtualRegister(RC);
8581 Register RetryOldVal =
MRI.createVirtualRegister(RC);
8582 Register RetrySwapVal =
MRI.createVirtualRegister(RC);
8657 if (!
MI.registerDefIsDead(SystemZ::CC))
8660 MI.eraseFromParent();
8676 Register Tmp1 =
MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
8677 Register Tmp2 =
MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
8685 MI.eraseFromParent();
8694 bool ClearEven)
const {
8702 Register In128 =
MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
8706 Register NewIn128 =
MRI.createVirtualRegister(&SystemZ::GR128BitRegClass);
8707 Register Zero64 =
MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
8718 MI.eraseFromParent();
8725 unsigned Opcode,
bool IsMemset)
const {
8732 uint64_t DestDisp =
MI.getOperand(1).getImm();
8738 if (!isUInt<12>(Disp)) {
8739 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
8740 unsigned Opcode =
TII->getOpcodeForOffset(SystemZ::LA, Disp);
8750 SrcDisp =
MI.getOperand(3).getImm();
8753 SrcDisp = DestDisp++;
8754 foldDisplIfNeeded(DestBase, DestDisp);
8758 bool IsImmForm = LengthMO.
isImm();
8759 bool IsRegForm = !IsImmForm;
8766 unsigned Length) ->
void {
8785 bool NeedsLoop =
false;
8787 Register LenAdjReg = SystemZ::NoRegister;
8789 ImmLength = LengthMO.
getImm();
8790 ImmLength += IsMemset ? 2 : 1;
8791 if (ImmLength == 0) {
8792 MI.eraseFromParent();
8795 if (Opcode == SystemZ::CLC) {
8796 if (ImmLength > 3 * 256)
8806 }
else if (ImmLength > 6 * 256)
8814 LenAdjReg = LengthMO.
getReg();
8820 (Opcode == SystemZ::CLC && (ImmLength > 256 || NeedsLoop)
8826 MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
8828 TII->loadImmediate(*
MBB,
MI, StartCountReg, ImmLength / 256);
8839 Register Reg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
8843 if (DestBase.
isReg() && DestBase.
getReg() == SystemZ::NoRegister)
8844 DestBase = loadZeroAddress();
8845 if (SrcBase.
isReg() && SrcBase.
getReg() == SystemZ::NoRegister)
8846 SrcBase = HaveSingleBase ? DestBase : loadZeroAddress();
8856 (HaveSingleBase ? StartSrcReg :
forceReg(
MI, DestBase,
TII));
8859 Register ThisSrcReg =
MRI.createVirtualRegister(RC);
8861 (HaveSingleBase ? ThisSrcReg :
MRI.createVirtualRegister(RC));
8862 Register NextSrcReg =
MRI.createVirtualRegister(RC);
8864 (HaveSingleBase ? NextSrcReg :
MRI.createVirtualRegister(RC));
8865 RC = &SystemZ::GR64BitRegClass;
8866 Register ThisCountReg =
MRI.createVirtualRegister(RC);
8867 Register NextCountReg =
MRI.createVirtualRegister(RC);
8893 MBB = MemsetOneCheckMBB;
8936 if (EndMBB && !ImmLength)
8958 if (!HaveSingleBase)
8965 if (Opcode == SystemZ::MVC)
8992 if (!HaveSingleBase)
9014 Register RemSrcReg =
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9015 Register RemDestReg = HaveSingleBase ? RemSrcReg
9016 :
MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9020 if (!HaveSingleBase)
9036 if (Opcode != SystemZ::MVC) {
9046 while (ImmLength > 0) {
9050 foldDisplIfNeeded(DestBase, DestDisp);
9051 foldDisplIfNeeded(SrcBase, SrcDisp);
9052 insertMemMemOp(
MBB,
MI, DestBase, DestDisp, SrcBase, SrcDisp, ThisLength);
9053 DestDisp += ThisLength;
9054 SrcDisp += ThisLength;
9055 ImmLength -= ThisLength;
9058 if (EndMBB && ImmLength > 0) {
9074 MI.eraseFromParent();
9087 uint64_t End1Reg =
MI.getOperand(0).getReg();
9088 uint64_t Start1Reg =
MI.getOperand(1).getReg();
9089 uint64_t Start2Reg =
MI.getOperand(2).getReg();
9090 uint64_t CharReg =
MI.getOperand(3).getReg();
9093 uint64_t This1Reg =
MRI.createVirtualRegister(RC);
9094 uint64_t This2Reg =
MRI.createVirtualRegister(RC);
9133 MI.eraseFromParent();
9140 bool NoFloat)
const {
9146 MI.setDesc(
TII->get(Opcode));
9150 uint64_t Control =
MI.getOperand(2).getImm();
9151 static const unsigned GPRControlBit[16] = {
9152 0x8000, 0x8000, 0x4000, 0x4000, 0x2000, 0x2000, 0x1000, 0x1000,
9153 0x0800, 0x0800, 0x0400, 0x0400, 0x0200, 0x0200, 0x0100, 0x0100
9155 Control |= GPRControlBit[15];
9157 Control |= GPRControlBit[11];
9158 MI.getOperand(2).setImm(Control);
9161 for (
int I = 0;
I < 16;
I++) {
9162 if ((Control & GPRControlBit[
I]) == 0) {
9169 if (!NoFloat && (Control & 4) != 0) {
9170 if (Subtarget.hasVector()) {
9202 MI.eraseFromParent();
9215 Register SizeReg =
MI.getOperand(2).getReg();
9227 Register PHIReg =
MRI->createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9228 Register IncReg =
MRI->createVirtualRegister(&SystemZ::ADDR64BitRegClass);
9293 MI.eraseFromParent();
9297SDValue SystemZTargetLowering::
9308 switch (
MI.getOpcode()) {
9309 case SystemZ::Select32:
9310 case SystemZ::Select64:
9311 case SystemZ::Select128:
9312 case SystemZ::SelectF32:
9313 case SystemZ::SelectF64:
9314 case SystemZ::SelectF128:
9315 case SystemZ::SelectVR32:
9316 case SystemZ::SelectVR64:
9317 case SystemZ::SelectVR128:
9318 return emitSelect(
MI,
MBB);
9320 case SystemZ::CondStore8Mux:
9321 return emitCondStore(
MI,
MBB, SystemZ::STCMux, 0,
false);
9322 case SystemZ::CondStore8MuxInv:
9323 return emitCondStore(
MI,
MBB, SystemZ::STCMux, 0,
true);
9324 case SystemZ::CondStore16Mux:
9325 return emitCondStore(
MI,
MBB, SystemZ::STHMux, 0,
false);
9326 case SystemZ::CondStore16MuxInv:
9327 return emitCondStore(
MI,
MBB, SystemZ::STHMux, 0,
true);
9328 case SystemZ::CondStore32Mux:
9329 return emitCondStore(
MI,
MBB, SystemZ::STMux, SystemZ::STOCMux,
false);
9330 case SystemZ::CondStore32MuxInv:
9331 return emitCondStore(
MI,
MBB, SystemZ::STMux, SystemZ::STOCMux,
true);
9332 case SystemZ::CondStore8:
9333 return emitCondStore(
MI,
MBB, SystemZ::STC, 0,
false);
9334 case SystemZ::CondStore8Inv:
9335 return emitCondStore(
MI,
MBB, SystemZ::STC, 0,
true);
9336 case SystemZ::CondStore16:
9337 return emitCondStore(
MI,
MBB, SystemZ::STH, 0,
false);
9338 case SystemZ::CondStore16Inv:
9339 return emitCondStore(
MI,
MBB, SystemZ::STH, 0,
true);
9340 case SystemZ::CondStore32:
9341 return emitCondStore(
MI,
MBB, SystemZ::ST, SystemZ::STOC,
false);
9342 case SystemZ::CondStore32Inv:
9343 return emitCondStore(
MI,
MBB, SystemZ::ST, SystemZ::STOC,
true);
9344 case SystemZ::CondStore64:
9345 return emitCondStore(
MI,
MBB, SystemZ::STG, SystemZ::STOCG,
false);
9346 case SystemZ::CondStore64Inv:
9347 return emitCondStore(
MI,
MBB, SystemZ::STG, SystemZ::STOCG,
true);
9348 case SystemZ::CondStoreF32:
9349 return emitCondStore(
MI,
MBB, SystemZ::STE, 0,
false);
9350 case SystemZ::CondStoreF32Inv:
9351 return emitCondStore(
MI,
MBB, SystemZ::STE, 0,
true);
9352 case SystemZ::CondStoreF64:
9353 return emitCondStore(
MI,
MBB, SystemZ::STD, 0,
false);
9354 case SystemZ::CondStoreF64Inv:
9355 return emitCondStore(
MI,
MBB, SystemZ::STD, 0,
true);
9357 case SystemZ::SCmp128Hi:
9358 return emitICmp128Hi(
MI,
MBB,
false);
9359 case SystemZ::UCmp128Hi:
9360 return emitICmp128Hi(
MI,
MBB,
true);
9362 case SystemZ::PAIR128:
9363 return emitPair128(
MI,
MBB);
9364 case SystemZ::AEXT128:
9365 return emitExt128(
MI,
MBB,
false);
9366 case SystemZ::ZEXT128:
9367 return emitExt128(
MI,
MBB,
true);
9369 case SystemZ::ATOMIC_SWAPW:
9370 return emitAtomicLoadBinary(
MI,
MBB, 0);
9372 case SystemZ::ATOMIC_LOADW_AR:
9373 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::AR);
9374 case SystemZ::ATOMIC_LOADW_AFI:
9375 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::AFI);
9377 case SystemZ::ATOMIC_LOADW_SR:
9378 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::SR);
9380 case SystemZ::ATOMIC_LOADW_NR:
9381 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NR);
9382 case SystemZ::ATOMIC_LOADW_NILH:
9383 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NILH);
9385 case SystemZ::ATOMIC_LOADW_OR:
9386 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::OR);
9387 case SystemZ::ATOMIC_LOADW_OILH:
9388 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::OILH);
9390 case SystemZ::ATOMIC_LOADW_XR:
9391 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::XR);
9392 case SystemZ::ATOMIC_LOADW_XILF:
9393 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::XILF);
9395 case SystemZ::ATOMIC_LOADW_NRi:
9396 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NR,
true);
9397 case SystemZ::ATOMIC_LOADW_NILHi:
9398 return emitAtomicLoadBinary(
MI,
MBB, SystemZ::NILH,
true);
9400 case SystemZ::ATOMIC_LOADW_MIN:
9402 case SystemZ::ATOMIC_LOADW_MAX:
9404 case SystemZ::ATOMIC_LOADW_UMIN:
9406 case SystemZ::ATOMIC_LOADW_UMAX:
9409 case SystemZ::ATOMIC_CMP_SWAPW:
9410 return emitAtomicCmpSwapW(
MI,
MBB);
9411 case SystemZ::MVCImm:
9412 case SystemZ::MVCReg:
9413 return emitMemMemWrapper(
MI,
MBB, SystemZ::MVC);
9414 case SystemZ::NCImm:
9415 return emitMemMemWrapper(
MI,
MBB, SystemZ::NC);
9416 case SystemZ::OCImm:
9417 return emitMemMemWrapper(
MI,
MBB, SystemZ::OC);
9418 case SystemZ::XCImm:
9419 case SystemZ::XCReg:
9420 return emitMemMemWrapper(
MI,
MBB, SystemZ::XC);
9421 case SystemZ::CLCImm:
9422 case SystemZ::CLCReg:
9423 return emitMemMemWrapper(
MI,
MBB, SystemZ::CLC);
9424 case SystemZ::MemsetImmImm:
9425 case SystemZ::MemsetImmReg:
9426 case SystemZ::MemsetRegImm:
9427 case SystemZ::MemsetRegReg:
9428 return emitMemMemWrapper(
MI,
MBB, SystemZ::MVC,
true);
9429 case SystemZ::CLSTLoop:
9430 return emitStringWrapper(
MI,
MBB, SystemZ::CLST);
9431 case SystemZ::MVSTLoop:
9432 return emitStringWrapper(
MI,
MBB, SystemZ::MVST);
9433 case SystemZ::SRSTLoop:
9434 return emitStringWrapper(
MI,
MBB, SystemZ::SRST);
9435 case SystemZ::TBEGIN:
9436 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGIN,
false);
9437 case SystemZ::TBEGIN_nofloat:
9438 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGIN,
true);
9439 case SystemZ::TBEGINC:
9440 return emitTransactionBegin(
MI,
MBB, SystemZ::TBEGINC,
true);
9441 case SystemZ::LTEBRCompare_Pseudo:
9442 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTEBR);
9443 case SystemZ::LTDBRCompare_Pseudo:
9444 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTDBR);
9445 case SystemZ::LTXBRCompare_Pseudo:
9446 return emitLoadAndTestCmp0(
MI,
MBB, SystemZ::LTXBR);
9448 case SystemZ::PROBED_ALLOCA:
9449 return emitProbedAlloca(
MI,
MBB);
9451 case TargetOpcode::STACKMAP:
9452 case TargetOpcode::PATCHPOINT:
9463SystemZTargetLowering::getRepRegClassFor(
MVT VT)
const {
9464 if (VT == MVT::Untyped)
9465 return &SystemZ::ADDR128BitRegClass;
9491 DAG.
getMachineNode(SystemZ::EFPC, dl, {MVT::i32, MVT::Other}, Chain), 0);
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu AMDGPU Register Bank Select
static bool isZeroVector(SDValue N)
Function Alias Analysis Results
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, const CCValAssign &VA, const SDLoc &DL)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
const HexagonInstrInfo * TII
static bool isUndef(ArrayRef< int > Mask)
iv Induction Variable Users
unsigned const TargetRegisterInfo * TRI
const char LLVMTargetMachineRef TM
static bool isSelectPseudo(MachineInstr &MI)
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static void adjustForLTGFR(Comparison &C)
static void adjustSubwordCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static SDValue joinDwords(SelectionDAG &DAG, const SDLoc &DL, SDValue Op0, SDValue Op1)
static bool isOnlyUsedByStores(SDValue StoredVal, SelectionDAG &DAG)
static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT, unsigned Opcode, SDValue Op0, SDValue Op1, SDValue &Even, SDValue &Odd)
static void adjustForRedundantAnd(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static SDValue buildScalarToVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Value)
static SDValue lowerI128ToGR128(SelectionDAG &DAG, SDValue In)
static bool isSimpleShift(SDValue N, unsigned &ShiftVal)
static bool chooseShuffleOpNos(int *OpNos, unsigned &OpNo0, unsigned &OpNo1)
static uint32_t findZeroVectorIdx(SDValue *Ops, unsigned Num)
static bool isVectorElementSwap(ArrayRef< int > M, EVT VT)
static void getCSAddressAndShifts(SDValue Addr, SelectionDAG &DAG, SDLoc DL, SDValue &AlignedAddr, SDValue &BitShift, SDValue &NegBitShift)
static bool isShlDoublePermute(const SmallVectorImpl< int > &Bytes, unsigned &StartIndex, unsigned &OpNo0, unsigned &OpNo1)
static SDValue getPermuteNode(SelectionDAG &DAG, const SDLoc &DL, const Permute &P, SDValue Op0, SDValue Op1)
static SDNode * emitIntrinsicWithCCAndChain(SelectionDAG &DAG, SDValue Op, unsigned Opcode)
static SDValue getCCResult(SelectionDAG &DAG, SDValue CCReg)
static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode, unsigned &CCValid)
static void lowerMUL_LOHI32(SelectionDAG &DAG, const SDLoc &DL, unsigned Extend, SDValue Op0, SDValue Op1, SDValue &Hi, SDValue &Lo)
static void createPHIsForSelects(SmallVector< MachineInstr *, 8 > &Selects, MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB, MachineBasicBlock *SinkMBB)
static SDValue getGeneralPermuteNode(SelectionDAG &DAG, const SDLoc &DL, SDValue *Ops, const SmallVectorImpl< int > &Bytes)
static unsigned getVectorComparisonOrInvert(ISD::CondCode CC, CmpMode Mode, bool &Invert)
static unsigned CCMaskForCondCode(ISD::CondCode CC)
static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static void adjustForFNeg(Comparison &C)
static bool isScalarToVector(SDValue Op)
static SDValue emitSETCC(SelectionDAG &DAG, const SDLoc &DL, SDValue CCReg, unsigned CCValid, unsigned CCMask)
static bool matchPermute(const SmallVectorImpl< int > &Bytes, const Permute &P, unsigned &OpNo0, unsigned &OpNo1)
static bool isAddCarryChain(SDValue Carry)
static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static MachineOperand earlyUseOperand(MachineOperand Op)
static bool canUseSiblingCall(const CCState &ArgCCInfo, SmallVectorImpl< CCValAssign > &ArgLocs, SmallVectorImpl< ISD::OutputArg > &Outs)
static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask)
static bool getzOSCalleeAndADA(SelectionDAG &DAG, SDValue &Callee, SDValue &ADA, SDLoc &DL, SDValue &Chain)
static bool shouldSwapCmpOperands(const Comparison &C)
static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType)
static SDValue getADAEntry(SelectionDAG &DAG, SDValue Val, SDLoc DL, unsigned Offset, bool LoadAdr=false)
static SDNode * emitIntrinsicWithCC(SelectionDAG &DAG, SDValue Op, unsigned Opcode)
static void adjustForSubtraction(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static bool getVPermMask(SDValue ShuffleOp, SmallVectorImpl< int > &Bytes)
static const Permute PermuteForms[]
static bool isSubBorrowChain(SDValue Carry)
static void adjustICmp128(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
static APInt getDemandedSrcElements(SDValue Op, const APInt &DemandedElts, unsigned OpNo)
static SDValue getAbsolute(SelectionDAG &DAG, const SDLoc &DL, SDValue Op, bool IsNegative)
static unsigned computeNumSignBitsBinOp(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth, unsigned OpNo)
static SDValue tryBuildVectorShuffle(SelectionDAG &DAG, BuildVectorSDNode *BVN)
static bool isMovedFromParts(SDValue Val, SDValue &LoPart, SDValue &HiPart)
static unsigned getVectorComparison(ISD::CondCode CC, CmpMode Mode)
static SDValue lowerGR128ToI128(SelectionDAG &DAG, SDValue In)
static SDValue MergeInputChains(SDNode *N1, SDNode *N2)
static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask, uint64_t Mask, uint64_t CmpVal, unsigned ICmpType)
static bool isIntrinsicWithCC(SDValue Op, unsigned &Opcode, unsigned &CCValid)
static SDValue expandV4F32ToV2F64(SelectionDAG &DAG, int Start, const SDLoc &DL, SDValue Op, SDValue Chain)
static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1, ISD::CondCode Cond, const SDLoc &DL, SDValue Chain=SDValue(), bool IsSignaling=false)
static bool checkCCKill(MachineInstr &MI, MachineBasicBlock *MBB)
static Register forceReg(MachineInstr &MI, MachineOperand &Base, const SystemZInstrInfo *TII)
static bool is32Bit(EVT VT)
static std::pair< unsigned, const TargetRegisterClass * > parseRegisterNumber(StringRef Constraint, const TargetRegisterClass *RC, const unsigned *Map, unsigned Size)
static bool matchDoublePermute(const SmallVectorImpl< int > &Bytes, const Permute &P, SmallVectorImpl< int > &Transform)
static Comparison getIntrinsicCmp(SelectionDAG &DAG, unsigned Opcode, SDValue Call, unsigned CCValid, uint64_t CC, ISD::CondCode Cond)
static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg)
static AddressingMode getLoadStoreAddrMode(bool HasVector, Type *Ty)
static SDValue buildMergeScalars(SelectionDAG &DAG, const SDLoc &DL, EVT VT, SDValue Op0, SDValue Op1)
static void computeKnownBitsBinOp(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth, unsigned OpNo)
static bool getShuffleInput(const SmallVectorImpl< int > &Bytes, unsigned Start, unsigned BytesPerElement, int &Base)
static AddressingMode supportedAddressingMode(Instruction *I, bool HasVector)
static void adjustZeroCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C)
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
void setBitsFrom(unsigned loBit)
Set the top bits starting from loBit.
unsigned getActiveBits() const
Compute the number of active bits in the value.
APInt trunc(unsigned width) const
Truncate to new width.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool isSingleWord() const
Determine if this APInt just has one word to store value.
void insertBits(const APInt &SubBits, unsigned bitPosition)
Insert the bits from a smaller APInt starting at bitPosition.
void lshrInPlace(unsigned ShiftAmt)
Logical right-shift this APInt by ShiftAmt in place.
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
an instruction that atomically reads a memory location, combines it with another value,...
BinOp getOperation() const
StringRef getValueAsString() const
Return the attribute's value as a string.
The address of a basic block.
A "pseudo-class" with methods for operating on BUILD_VECTORs.
bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector.
CCState - This class holds information needed while lowering arguments and return values.
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
LocInfo getLocInfo() const
int64_t getLocMemOffset() const
This class represents a function call, abstracting a target machine's calling convention.
This is an important base class in LLVM.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
uint64_t getFnAttributeAsParsedInteger(StringRef Kind, uint64_t Default=0) const
For a string attribute Kind, parse attribute as an integer.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
const GlobalObject * getAliaseeObject() const
bool hasPrivateLinkage() const
bool hasInternalLinkage() const
A wrapper class for inspecting calls to intrinsic functions.
This is an important class for using LLVM in a threaded context.
This class is used to represent ISD::LOAD nodes.
static auto integer_fixedlen_vector_valuetypes()
bool isVector() const
Return true if this is a vector value type.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
static MVT getVectorVT(MVT VT, unsigned NumElements)
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator_range< succ_iterator > successors()
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setFrameAddressIsTaken(bool T)
void setReturnAddressIsTaken(bool s)
MachineFunctionProperties & reset(Property P)
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
reverse_iterator rbegin()
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineFunctionProperties & getProperties() const
Get the function properties.
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
bool killsRegister(Register Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr kills the specified register.
bool definesRegister(Register Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr fully defines the specified register.
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr reads the specified register.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
Register getReg() const
getReg - Returns the register number.
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Wrapper class representing virtual and physical registers.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
This class provides iterator support for SDUse operands that use a specific SDNode.
Represents one node in the SelectionDAG.
bool hasOneUse() const
Return true if there is exactly one use of this node.
iterator_range< use_iterator > uses()
SDNodeFlags getFlags() const
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
const SDValue & getOperand(unsigned Num) const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
void setFlags(SDNodeFlags NewFlags)
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
uint64_t getScalarValueSizeInBits() const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
unsigned getOpcode() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
SDValue getConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offs=0, bool isT=false, unsigned TargetFlags=0)
SDNode * isConstantIntBuildVectorOrConstantInt(SDValue N) const
Test whether the given value is a constant int or similar node.
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
SDValue getGLOBAL_OFFSET_TABLE(EVT VT)
Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain, SDValue Ptr, SDValue Val, MachineMemOperand *MMO)
Gets a node for an atomic op, produces result (if relevant) and chain and takes 2 operands.
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
const TargetLowering & getTargetLoweringInfo() const
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
const DataLayout & getDataLayout() const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getMemBasePlusOffset(SDValue Base, TypeSize Offset, const SDLoc &DL, const SDNodeFlags Flags=SDNodeFlags())
Returns sum of the base pointer and offset.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getSplatVector(EVT VT, const SDLoc &DL, SDValue Op)
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
bool SignBitIsZero(SDValue Op, unsigned Depth=0) const
Return true if the sign bit of Op is known to be zero.
SDValue getRegister(unsigned Reg, EVT VT)
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, uint64_t Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getRegisterMask(const uint32_t *RegMask)
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
ArrayRef< int > getMask() const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
constexpr size_t size() const
size - Get the string size.
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
A SystemZ-specific class detailing special use registers particular for calling conventions.
virtual int getStackPointerBias()=0
virtual int getCallFrameSize()=0
virtual int getStackPointerRegister()=0
A SystemZ-specific constant pool value.
static SystemZConstantPoolValue * Create(const GlobalValue *GV, SystemZCP::SystemZCPModifier Modifier)
unsigned getVarArgsFrameIndex() const
void setVarArgsFrameIndex(unsigned FI)
void setRegSaveFrameIndex(unsigned FI)
void incNumLocalDynamicTLSAccesses()
Register getVarArgsFirstGPR() const
void setADAVirtualRegister(Register Reg)
void setVarArgsFirstGPR(Register GPR)
Register getADAVirtualRegister() const
void setSizeOfFnParams(unsigned Size)
void setVarArgsFirstFPR(Register FPR)
unsigned getRegSaveFrameIndex() const
Register getVarArgsFirstFPR() const
const SystemZInstrInfo * getInstrInfo() const override
bool isPC32DBLSymbol(const GlobalValue *GV, CodeModel::Model CM) const
const TargetFrameLowering * getFrameLowering() const override
bool isTargetXPLINK64() const
SystemZCallingConventionRegisters * getSpecialRegisters() const
const SystemZRegisterInfo * getRegisterInfo() const override
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const override
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes) const override
Returns the target specific optimal type for load and store operations as a result of memset,...
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, LLVMContext &Context) const override
This hook should be implemented to check whether the return values described by the Outs array can fi...
bool hasInlineStackProbe(const MachineFunction &MF) const override
Returns true if stack probing through inline assembly is requested.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *BB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &, EVT) const override
Return the ValueType of the result of SETCC operations.
bool allowTruncateForTailCall(Type *, Type *) const override
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, const SmallVectorImpl< ISD::OutputArg > &Outs, const SmallVectorImpl< SDValue > &OutVals, const SDLoc &DL, SelectionDAG &DAG) const override
This hook must be implemented to lower outgoing return values, described by the Outs array,...
bool findOptimalMemOpLowering(std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const override
Determines the optimal series of memory ops to replace the memset / memcpy.
bool useSoftFloat() const override
std::pair< SDValue, SDValue > makeExternalCall(SDValue Chain, SelectionDAG &DAG, const char *CalleeName, EVT RetVT, ArrayRef< SDValue > Ops, CallingConv::ID CallConv, bool IsSigned, SDLoc DL, bool DoesNotReturn, bool IsReturnValueUsed) const
bool mayBeEmittedAsTailCall(const CallInst *CI) const override
Return true if the target may be able emit the call instruction as a tail call.
bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const override
Target-specific splitting of values into parts that fit a register storing a legal type.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
SystemZTargetLowering(const TargetMachine &TM, const SystemZSubtarget &STI)
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
bool isLegalICmpImmediate(int64_t Imm) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
TargetLowering::ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const override
Determine if the target supports unaligned memory accesses.
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
TargetLowering::ConstraintType getConstraintType(StringRef Constraint) const override
Given a constraint, return the type of constraint it is for this target.
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
SDValue joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, std::optional< CallingConv::ID > CC) const override
Target-specific combining of register parts into its original value.
bool isTruncateFree(Type *, Type *) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const override
Determine the number of bits in the operation that are sign bits.
void LowerOperationWrapper(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked by the type legalizer to legalize nodes with an illegal operand type but leg...
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue LowerCall(CallLoweringInfo &CLI, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower calls into the specified DAG.
bool isLegalAddImmediate(int64_t Imm) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
This callback is invoked when a node result type is illegal for the target, and the operation was reg...
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
Lower the specified operand into the Ops vector.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
bool isGuaranteedNotToBeUndefOrPoisonForTargetNode(SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, bool PoisonOnly, unsigned Depth) const override
Return true if this function can prove that Op is never poison and, if PoisonOnly is false,...
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl< ISD::InputArg > &Ins, const SDLoc &DL, SelectionDAG &DAG, SmallVectorImpl< SDValue > &InVals) const override
This hook must be implemented to lower the incoming (formal) arguments, described by the Ins array,...
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
unsigned getStackProbeSize(const MachineFunction &MF) const
XPLINK64 calling convention specific use registers Particular to z/OS when in 64 bit mode.
int getCallFrameSize() final
Information about stack frame layout on the target.
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
virtual bool hasFP(const MachineFunction &MF) const =0
hasFP - Return true if the specified function should have a dedicated frame pointer register.
TargetInstrInfo - Interface to description of machine instruction set.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const
Returns true if arguments should be sign-extended in lib calls.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
const TargetMachine & getTargetMachine() const
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
Register getStackPointerRegisterToSaveRestore() const
If a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save...
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
virtual const TargetRegisterClass * getRepRegClassFor(MVT VT) const
Return the 'representative' register class for the specified value type.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
std::vector< ArgListEntry > ArgListTy
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
virtual MVT getPointerMemTy(const DataLayout &DL, uint32_t AS=0) const
Return the in-memory pointer type for the given address space, defaults to the pointer type from the ...
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual bool findOptimalMemOpLowering(std::vector< EVT > &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, unsigned SrcAS, const AttributeList &FuncAttributes) const
Determines the optimal series of memory ops to replace the memset / memcpy.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Primary interface to the complete machine description for the target machine.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
unsigned getPointerSize(unsigned AS) const
Get the pointer size for this target.
CodeModel::Model getCodeModel() const
Returns the code model.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetInstrInfo * getInstrInfo() const
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
constexpr ScalarTy getFixedValue() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ MEMBARRIER
MEMBARRIER - Compiler barrier only; generate a no-op.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ STRICT_FSQRT
Constrained versions of libm-equivalent floating point intrinsics.
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
@ BR_JT
BR_JT - Jumptable branch.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ UNDEF
UNDEF - An undefined node.
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ GET_ROUNDING
Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest, ties to even 2 Round to ...
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
@ STRICT_SINT_TO_FP
STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to a floating point value.
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ STRICT_FADD
Constrained versions of the binary floating point operators.
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ BRCOND
BRCOND - Conditional branch.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ GET_DYNAMIC_AREA_OFFSET
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isNormalStore(const SDNode *N)
Returns true if the specified node is a non-truncating and unindexed store.
CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
@ Define
Register definition.
@ System
Synchronized with respect to all concurrently executing threads.
@ MO_ADA_DATA_SYMBOL_ADDR
@ MO_ADA_DIRECT_FUNC_DESC
@ MO_ADA_INDIRECT_FUNC_DESC
const unsigned GR64Regs[16]
const unsigned VR128Regs[32]
const unsigned GR128Regs[16]
const unsigned FP32Regs[16]
const unsigned GR32Regs[16]
const unsigned FP64Regs[16]
const int64_t ELFCallFrameSize
const unsigned VR64Regs[32]
const unsigned FP128Regs[16]
const unsigned VR32Regs[32]
unsigned odd128(bool Is32bit)
const unsigned CCMASK_CMP_GE
static bool isImmHH(uint64_t Val)
const unsigned CCMASK_TEND
const unsigned CCMASK_CS_EQ
const unsigned CCMASK_TBEGIN
const MCPhysReg ELFArgFPRs[ELFNumArgFPRs]
MachineBasicBlock * splitBlockBefore(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
const unsigned CCMASK_TM_SOME_1
const unsigned CCMASK_LOGICAL_CARRY
const unsigned TDCMASK_NORMAL_MINUS
const unsigned CCMASK_TDC
const unsigned CCMASK_FCMP
const unsigned CCMASK_TM_SOME_0
static bool isImmHL(uint64_t Val)
const unsigned TDCMASK_SUBNORMAL_MINUS
const unsigned TDCMASK_NORMAL_PLUS
const unsigned CCMASK_CMP_GT
const unsigned TDCMASK_QNAN_MINUS
const unsigned CCMASK_ANY
const unsigned CCMASK_ARITH
const unsigned CCMASK_TM_MIXED_MSB_0
const unsigned TDCMASK_SUBNORMAL_PLUS
static bool isImmLL(uint64_t Val)
const unsigned VectorBits
static bool isImmLH(uint64_t Val)
MachineBasicBlock * emitBlockAfter(MachineBasicBlock *MBB)
const unsigned TDCMASK_INFINITY_PLUS
unsigned reverseCCMask(unsigned CCMask)
const unsigned CCMASK_TM_ALL_0
const unsigned CCMASK_CMP_LE
const unsigned CCMASK_CMP_O
const unsigned CCMASK_CMP_EQ
const unsigned VectorBytes
const unsigned TDCMASK_INFINITY_MINUS
const unsigned CCMASK_ICMP
const unsigned CCMASK_VCMP_ALL
MachineBasicBlock * splitBlockAfter(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
const unsigned CCMASK_VCMP
const unsigned CCMASK_TM_MIXED_MSB_1
const unsigned CCMASK_TM_MSB_0
const unsigned CCMASK_ARITH_OVERFLOW
const unsigned CCMASK_CS_NE
const unsigned TDCMASK_SNAN_PLUS
const unsigned CCMASK_CMP_LT
const unsigned CCMASK_CMP_NE
const unsigned TDCMASK_ZERO_PLUS
const unsigned TDCMASK_QNAN_PLUS
const unsigned TDCMASK_ZERO_MINUS
unsigned even128(bool Is32bit)
const unsigned CCMASK_TM_ALL_1
const unsigned CCMASK_LOGICAL_BORROW
const unsigned ELFNumArgFPRs
const unsigned CCMASK_CMP_UO
const unsigned CCMASK_LOGICAL
const unsigned CCMASK_TM_MSB_1
const unsigned TDCMASK_SNAN_MINUS
Reg
All possible values of the reg field in the ModR/M byte.
support::ulittle32_t Word
NodeAddr< CodeNode * > Code
constexpr const char32_t SBase
This is an optimization pass for GlobalISel generic memory operations.
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
testing::Matcher< const detail::ErrorHolder & > Failed()
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
void dumpBytes(ArrayRef< uint8_t > Bytes, raw_ostream &OS)
Convert ‘Bytes’ to a hex string and output to ‘OS’.
T bit_ceil(T Value)
Returns the smallest integral power of two no smaller than Value if Value is nonzero.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
int countl_zero(T Val)
Count number of 0's from the most significant bit to the least stopping at the first 1.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
@ Mul
Product of integers.
DWARFExpression::Operation Op
constexpr unsigned BitWidth
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
AddressingMode(bool LongDispl, bool IdxReg)
This struct is a compact representation of a valid (non-zero power of two) alignment.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
bool isRound() const
Return true if the size is a power-of-two number of bytes.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
bool isInteger() const
Return true if this is an integer or a vector integer type.
KnownBits anyextOrTrunc(unsigned BitWidth) const
Return known bits for an "any" extension or truncation of the value we're tracking.
unsigned getBitWidth() const
Get the bit width of this value.
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
void resetAll()
Resets the known state of all bits.
KnownBits intersectWith(const KnownBits &RHS) const
Returns KnownBits information that is known to be true for both this and RHS.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
These are IR-level optimization flags that may be propagated to SDNodes.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
SystemZVectorConstantInfo(APInt IntImm)
SmallVector< unsigned, 2 > OpVals
bool isVectorConstantLegal(const SystemZSubtarget &Subtarget)
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
This contains information for each constraint that we are lowering.
This structure contains all information that is necessary for lowering calls.
SmallVector< ISD::InputArg, 32 > Ins
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setZExtResult(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setSExtResult(bool Value=true)
CallLoweringInfo & setNoReturn(bool Value=true)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
CallLoweringInfo & setChain(SDValue InChain)
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, AttributeSet ResultAttrs={})