50 #define DEBUG_TYPE "ppc-lowering"
64 STATISTIC(NumTailCalls,
"Number of tail calls");
65 STATISTIC(NumSiblingCalls,
"Number of sibling calls");
79 bool isPPC64 = Subtarget.
isPPC64();
116 if (isPPC64 || Subtarget.
hasFPCVT()) {
980 unsigned MaxMaxAlign) {
981 if (MaxAlign == MaxMaxAlign)
983 if (
VectorType *VTy = dyn_cast<VectorType>(Ty)) {
984 if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256)
986 else if (VTy->getBitWidth() >= 128 && MaxAlign < 16)
988 }
else if (
ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
989 unsigned EltAlign = 0;
991 if (EltAlign > MaxAlign)
993 }
else if (
StructType *STy = dyn_cast<StructType>(Ty)) {
994 for (
auto *EltTy : STy->elements()) {
995 unsigned EltAlign = 0;
997 if (EltAlign > MaxAlign)
999 if (MaxAlign == MaxMaxAlign)
1015 unsigned Align = Subtarget.
isPPC64() ? 8 : 4;
1148 return CFP->getValueAPF().isZero();
1152 if (
const ConstantFP *CFP = dyn_cast<ConstantFP>(
CP->getConstVal()))
1153 return CFP->getValueAPF().isZero();
1161 return Op < 0 || Op == Val;
1173 if (ShuffleKind == 0) {
1176 for (
unsigned i = 0;
i != 16; ++
i)
1179 }
else if (ShuffleKind == 2) {
1182 for (
unsigned i = 0;
i != 16; ++
i)
1185 }
else if (ShuffleKind == 1) {
1186 unsigned j = IsLE ? 0 : 1;
1187 for (
unsigned i = 0;
i != 8; ++
i)
1204 if (ShuffleKind == 0) {
1207 for (
unsigned i = 0;
i != 16;
i += 2)
1211 }
else if (ShuffleKind == 2) {
1214 for (
unsigned i = 0;
i != 16;
i += 2)
1218 }
else if (ShuffleKind == 1) {
1219 unsigned j = IsLE ? 0 : 2;
1220 for (
unsigned i = 0;
i != 8;
i += 2)
1246 if (ShuffleKind == 0) {
1249 for (
unsigned i = 0;
i != 16;
i += 4)
1255 }
else if (ShuffleKind == 2) {
1258 for (
unsigned i = 0;
i != 16;
i += 4)
1264 }
else if (ShuffleKind == 1) {
1265 unsigned j = IsLE ? 0 : 4;
1266 for (
unsigned i = 0;
i != 8;
i += 4)
1283 unsigned LHSStart,
unsigned RHSStart) {
1286 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
1287 "Unsupported merge size!");
1289 for (
unsigned i = 0;
i != 8/UnitSize; ++
i)
1290 for (
unsigned j = 0; j != UnitSize; ++j) {
1292 LHSStart+j+
i*UnitSize) ||
1294 RHSStart+j+
i*UnitSize))
1309 if (ShuffleKind == 1)
1310 return isVMerge(N, UnitSize, 0, 0);
1311 else if (ShuffleKind == 2)
1312 return isVMerge(N, UnitSize, 0, 16);
1316 if (ShuffleKind == 1)
1317 return isVMerge(N, UnitSize, 8, 8);
1318 else if (ShuffleKind == 0)
1319 return isVMerge(N, UnitSize, 8, 24);
1334 if (ShuffleKind == 1)
1335 return isVMerge(N, UnitSize, 8, 8);
1336 else if (ShuffleKind == 2)
1337 return isVMerge(N, UnitSize, 8, 24);
1341 if (ShuffleKind == 1)
1342 return isVMerge(N, UnitSize, 0, 0);
1343 else if (ShuffleKind == 0)
1344 return isVMerge(N, UnitSize, 0, 16);
1393 unsigned RHSStartValue) {
1397 for (
unsigned i = 0;
i < 2; ++
i)
1398 for (
unsigned j = 0; j < 4; ++j)
1400 i*RHSStartValue+j+IndexOffset) ||
1402 i*RHSStartValue+j+IndexOffset+8))
1424 unsigned indexOffset = CheckEven ? 4 : 0;
1425 if (ShuffleKind == 1)
1426 return isVMerge(N, indexOffset, 0);
1427 else if (ShuffleKind == 2)
1428 return isVMerge(N, indexOffset, 16);
1433 unsigned indexOffset = CheckEven ? 0 : 4;
1434 if (ShuffleKind == 1)
1435 return isVMerge(N, indexOffset, 0);
1436 else if (ShuffleKind == 0)
1437 return isVMerge(N, indexOffset, 16);
1459 for (i = 0; i != 16 && SVOp->
getMaskElt(i) < 0; ++
i)
1462 if (i == 16)
return -1;
1467 if (ShiftAmt < i)
return -1;
1472 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
1474 for (++i; i != 16; ++
i)
1477 }
else if (ShuffleKind == 1) {
1479 for (++i; i != 16; ++
i)
1486 ShiftAmt = 16 - ShiftAmt;
1496 (EltSize == 1 || EltSize == 2 || EltSize == 4));
1508 if (ElementBase >= 16)
1513 for (
unsigned i = 1;
i != EltSize; ++
i)
1517 for (
unsigned i = EltSize, e = 16;
i != e;
i += EltSize) {
1519 for (
unsigned j = 0; j != EltSize; ++j)
1527 unsigned &InsertAtByte,
bool &Swap,
bool IsLE) {
1530 for (
unsigned i = 0;
i < 4; ++
i) {
1537 if (B1 != B0+1 || B2 != B1+1 || B3 != B2+1)
1546 unsigned LittleEndianShifts[] = { 2, 1, 0, 3 };
1547 unsigned BigEndianShifts[] = { 3, 0, 1, 2 };
1552 if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) ||
1553 (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) {
1554 ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3];
1555 InsertAtByte = IsLE ? 12 : 0;
1560 if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) ||
1561 (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) {
1562 ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3];
1563 InsertAtByte = IsLE ? 8 : 4;
1568 if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) ||
1569 (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) {
1570 ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3];
1571 InsertAtByte = IsLE ? 4 : 8;
1576 if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) ||
1577 (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) {
1578 ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3];
1579 InsertAtByte = IsLE ? 0 : 12;
1589 unsigned XXINSERTWSrcElem = IsLE ? 2 : 1;
1590 if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) {
1591 InsertAtByte = IsLE ? 12 : 0;
1594 if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) {
1595 InsertAtByte = IsLE ? 8 : 4;
1598 if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) {
1599 InsertAtByte = IsLE ? 4 : 8;
1602 if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) {
1603 InsertAtByte = IsLE ? 0 : 12;
1618 return (16 / EltSize) - 1 - (SVOp->
getMaskElt(0) / EltSize);
1635 if (EltSize < ByteSize) {
1636 unsigned Multiple = ByteSize/EltSize;
1638 assert(Multiple > 1 && Multiple <= 4 &&
"How can this happen?");
1647 if (!UniquedVals[
i&(Multiple-1)].getNode())
1649 else if (UniquedVals[
i&(Multiple-1)] != N->
getOperand(
i))
1659 bool LeadingZero =
true;
1660 bool LeadingOnes =
true;
1661 for (
unsigned i = 0;
i != Multiple-1; ++
i) {
1662 if (!UniquedVals[
i].getNode())
continue;
1669 if (!UniquedVals[Multiple-1].getNode())
1671 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
1676 if (!UniquedVals[Multiple-1].getNode())
1678 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
1697 unsigned ValSizeInBytes = EltSize;
1700 Value = CN->getZExtValue();
1702 assert(CN->getValueType(0) ==
MVT::f32 &&
"Only one legal FP vector type!");
1703 Value =
FloatToBits(CN->getValueAPF().convertToFloat());
1709 if (ValSizeInBytes < ByteSize)
return SDValue();
1713 if (!
APInt(ValSizeInBytes * 8, Value).
isSplat(ByteSize * 8))
1720 if (MaskVal == 0)
return SDValue();
1723 if (SignExtend32<5>(MaskVal) == MaskVal)
1739 for (i = 0; i != 4 && SVOp->
getMaskElt(i) < 0; ++
i)
1742 if (i == 4)
return -1;
1747 if (ShiftAmt < i)
return -1;
1751 for (++i; i != 4; ++
i)
1767 if (!isa<ConstantSDNode>(N))
1770 Imm = (short)cast<ConstantSDNode>(N)->getZExtValue();
1772 return Imm == (int32_t)cast<ConstantSDNode>(
N)->getZExtValue();
1774 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
1803 APInt LHSKnownZero, LHSKnownOne;
1804 APInt RHSKnownZero, RHSKnownOne;
1806 LHSKnownZero, LHSKnownOne);
1810 RHSKnownZero, RHSKnownOne);
1813 if (~(LHSKnownZero | RHSKnownZero) == 0) {
1871 bool Aligned)
const {
1881 (!Aligned || (imm & 3) == 0)) {
1893 &&
"Cannot handle constant offsets yet!");
1905 (!Aligned || (imm & 3) == 0)) {
1909 APInt LHSKnownZero, LHSKnownOne;
1912 if ((LHSKnownZero.
getZExtValue()|~(uint64_t)imm) == ~0ULL) {
1916 dyn_cast<FrameIndexSDNode>(N.
getOperand(0))) {
1935 CN->getValueType(0));
1940 if ((CN->getValueType(0) ==
MVT::i32 ||
1941 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) &&
1942 (!Aligned || (CN->getZExtValue() & 3) == 0)) {
1943 int Addr = (int)CN->getZExtValue();
2006 Ptr =
LD->getBasePtr();
2007 VT =
LD->getMemoryVT();
2008 Alignment =
LD->getAlignment();
2010 Ptr =
ST->getBasePtr();
2011 VT =
ST->getMemoryVT();
2012 Alignment =
ST->getAlignment();
2036 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base))
2039 SDValue Val = cast<StoreSDNode>(
N)->getValue();
2069 isa<ConstantSDNode>(
Offset))
2084 unsigned &HiOpFlags,
unsigned &LoOpFlags,
2101 if (GV->hasHiddenVisibility()) {
2163 unsigned MOHiFlag, MOLoFlag;
2241 unsigned MOHiFlag, MOLoFlag;
2270 unsigned MOHiFlag, MOLoFlag;
2292 bool is64bit = Subtarget.
isPPC64();
2318 PtrVT, GOTReg, TGA);
2322 PtrVT, TGA, GOTPtr);
2359 PtrVT, GOTPtr, TGA, TGA);
2361 PtrVT, TLSAddr, TGA);
2383 unsigned MOHiFlag, MOLoFlag;
2443 if (C->isAllOnesValue() || C->isNullValue())
2468 const Value *SV = cast<SrcValueSDNode>(Node->
getOperand(2))->getValue();
2509 InChain = OverflowArea.
getValue(1);
2555 InChain = DAG.
getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr,
2562 assert(!Subtarget.
isPPC64() &&
"LowerVACOPY is PPC32 only");
2586 bool isPPC64 = (PtrVT ==
MVT::i64);
2592 Entry.
Ty = IntPtrTy;
2593 Entry.
Node = Trmp; Args.push_back(Entry);
2598 Args.push_back(Entry);
2600 Entry.
Node = FPtr; Args.push_back(Entry);
2601 Entry.
Node = Nest; Args.push_back(Entry);
2605 CLI.setDebugLoc(dl).setChain(Chain)
2610 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
2611 return CallResult.second;
2661 uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
2664 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
2667 uint64_t FPROffset = 1;
2676 uint64_t nextOffset = FPROffset;
2684 nextOffset += StackOffset;
2685 nextPtr = DAG.
getNode(
ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
2688 SDValue thirdStore = DAG.
getStore(secondStore, dl, StackOffsetFI, nextPtr,
2690 nextOffset += FrameOffset;
2691 nextPtr = DAG.
getNode(
ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
2694 return DAG.
getStore(thirdStore, dl, FR, nextPtr,
2698 #include "PPCGenCallingConv.inc"
2702 CCAssignFn *PPCTargetLowering::useFastISelCCs(
unsigned Flag)
const {
2703 return Flag ? CC_PPC64_ELF_FIS : RetCC_PPC64_ELF_FIS;
2720 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
2730 if (RegNum != NumArgRegs && RegNum % 2 == 1) {
2748 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
2753 int RegsLeft = NumArgRegs - RegNum;
2757 if (RegNum != NumArgRegs && RegsLeft < 4) {
2758 for (
int i = 0;
i < RegsLeft;
i++) {
2772 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
2782 if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) {
2795 static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5,
2796 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10,
2797 PPC::F11, PPC::F12, PPC::F13};
2801 PPC::QF1, PPC::QF2, PPC::QF3, PPC::QF4, PPC::QF5, PPC::QF6, PPC::QF7,
2802 PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13};
2807 unsigned PtrByteSize) {
2815 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
2824 unsigned PtrByteSize) {
2825 unsigned Align = PtrByteSize;
2841 if (BVAlign > PtrByteSize) {
2842 if (BVAlign % PtrByteSize != 0)
2844 "ByVal alignment is not a multiple of the pointer size");
2870 unsigned PtrByteSize,
2871 unsigned LinkageSize,
2872 unsigned ParamAreaSize,
2873 unsigned &ArgOffset,
2874 unsigned &AvailableFPRs,
2875 unsigned &AvailableVRs,
bool HasQPX) {
2876 bool UseMemory =
false;
2881 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
2884 if (ArgOffset >= LinkageSize + ParamAreaSize)
2890 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
2893 if (ArgOffset > LinkageSize + ParamAreaSize)
2904 if (AvailableFPRs > 0) {
2912 if (AvailableVRs > 0) {
2924 unsigned NumBytes) {
2926 unsigned AlignMask = TargetAlign - 1;
2927 NumBytes = (NumBytes + AlignMask) & ~AlignMask;
2931 SDValue PPCTargetLowering::LowerFormalArguments(
2937 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins,
2940 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins,
2943 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins,
2948 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
2990 unsigned PtrByteSize = 4;
2999 CCInfo.AllocateStack(LinkageSize, PtrByteSize);
3001 CCInfo.PreAnalyzeFormalArguments(Ins);
3003 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4);
3004 CCInfo.clearWasPPCF128();
3006 for (
unsigned i = 0, e = ArgLocs.
size();
i != e; ++
i) {
3019 RC = &PPC::GPRCRegClass;
3023 RC = &PPC::VSSRCRegClass;
3025 RC = &PPC::F4RCRegClass;
3029 RC = &PPC::VSFRCRegClass;
3031 RC = &PPC::F8RCRegClass;
3036 RC = &PPC::VRRCRegClass;
3039 RC = Subtarget.
hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass;
3043 RC = &PPC::VRRCRegClass;
3046 RC = &PPC::QFRCRegClass;
3049 RC = &PPC::QBRCRegClass;
3056 ValVT ==
MVT::i1 ? MVT::i32 : ValVT);
3085 CCByValInfo.
AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
3087 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal);
3090 unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
3091 MinReservedArea = std::max(MinReservedArea, LinkageSize);
3108 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
3113 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
3125 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
3130 CCInfo.getNextStackOffset(),
true));
3138 for (
unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
3142 VReg = MF.
addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
3157 for (
unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
3161 VReg = MF.
addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
3174 if (!MemOps.
empty())
3185 const SDLoc &dl)
const {
3196 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
3209 "fastcc not supported on varargs functions");
3215 unsigned PtrByteSize = 8;
3219 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
3220 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
3224 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
3230 const unsigned Num_QFPR_Regs = Num_FPR_Regs;
3238 bool HasParameterArea = !isELFv2ABI || isVarArg;
3239 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize;
3240 unsigned NumBytes = LinkageSize;
3241 unsigned AvailableFPRs = Num_FPR_Regs;
3242 unsigned AvailableVRs = Num_VR_Regs;
3243 for (
unsigned i = 0, e = Ins.
size();
i != e; ++
i) {
3248 PtrByteSize, LinkageSize, ParamAreaSize,
3249 NumBytes, AvailableFPRs, AvailableVRs,
3251 HasParameterArea =
true;
3258 unsigned ArgOffset = LinkageSize;
3259 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
3260 unsigned &QFPR_idx = FPR_idx;
3263 unsigned CurArgIdx = 0;
3264 for (
unsigned ArgNo = 0, e = Ins.
size(); ArgNo != e; ++ArgNo) {
3266 bool needsLoad =
false;
3267 EVT ObjectVT = Ins[ArgNo].VT;
3268 EVT OrigVT = Ins[ArgNo].ArgVT;
3270 unsigned ArgSize = ObjSize;
3272 if (Ins[ArgNo].isOrigArg()) {
3273 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
3274 CurArgIdx = Ins[ArgNo].getOrigArgIndex();
3279 unsigned CurArgOffset, Align;
3280 auto ComputeArgOffset = [&]() {
3283 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
3284 CurArgOffset = ArgOffset;
3291 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
3292 GPR_idx =
std::min(GPR_idx, Num_GPR_Regs);
3298 assert(Ins[ArgNo].isOrigArg() &&
"Byval arguments cannot be implicit");
3305 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3327 if (HasParameterArea ||
3328 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize)
3335 if (ObjSize < PtrByteSize) {
3339 if (!isLittleEndian) {
3345 if (GPR_idx != Num_GPR_Regs) {
3346 unsigned VReg = MF.
addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
3350 if (ObjSize==1 || ObjSize==2 || ObjSize==4) {
3367 ArgOffset += PtrByteSize;
3376 for (
unsigned j = 0; j <
ArgSize; j += PtrByteSize) {
3377 if (GPR_idx == Num_GPR_Regs)
3380 unsigned VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
3403 unsigned VReg = MF.
addLiveIn(PPC::X11, &PPC::G8RCRegClass);
3406 if (ObjectVT == MVT::i32 || ObjectVT ==
MVT::i1)
3407 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
3415 if (GPR_idx != Num_GPR_Regs) {
3416 unsigned VReg = MF.
addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
3419 if (ObjectVT == MVT::i32 || ObjectVT ==
MVT::i1)
3422 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
3428 ArgSize = PtrByteSize;
3439 if (FPR_idx != Num_FPR_Regs) {
3445 ? &PPC::VSSRCRegClass
3446 : &PPC::F4RCRegClass);
3449 ? &PPC::VSFRCRegClass
3450 : &PPC::F8RCRegClass);
3461 unsigned VReg = MF.
addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
3465 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0))
3486 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3496 if (!Subtarget.
hasQPX()) {
3500 if (VR_idx != Num_VR_Regs) {
3501 unsigned VReg = MF.
addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
3516 "Invalid QPX parameter type");
3524 if (QFPR_idx != Num_QFPR_Regs) {
3527 case MVT::v4f64: RC = &PPC::QFRCRegClass;
break;
3528 case MVT::v4f32: RC = &PPC::QSRCRegClass;
break;
3529 default: RC = &PPC::QBRCRegClass;
break;
3548 if (ObjSize < ArgSize && !isLittleEndian)
3549 CurArgOffset += ArgSize - ObjSize;
3559 unsigned MinReservedArea;
3560 if (HasParameterArea)
3561 MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize);
3563 MinReservedArea = LinkageSize;
3576 int Depth = ArgOffset;
3585 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
3586 GPR_idx < Num_GPR_Regs; ++GPR_idx) {
3587 unsigned VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
3598 if (!MemOps.
empty())
3604 SDValue PPCTargetLowering::LowerFormalArguments_Darwin(
3619 unsigned PtrByteSize = isPPC64 ? 8 : 4;
3621 unsigned ArgOffset = LinkageSize;
3623 unsigned MinReservedArea = ArgOffset;
3627 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
3630 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
3631 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
3635 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
3642 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
3644 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
3653 unsigned VecArgOffset = ArgOffset;
3654 if (!isVarArg && !isPPC64) {
3655 for (
unsigned ArgNo = 0, e = Ins.
size(); ArgNo != e;
3657 EVT ObjectVT = Ins[ArgNo].VT;
3664 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3693 VecArgOffset = ((VecArgOffset+15)/16)*16;
3694 VecArgOffset += 12*16;
3701 unsigned nAltivecParamsAtEnd = 0;
3703 unsigned CurArgIdx = 0;
3704 for (
unsigned ArgNo = 0, e = Ins.
size(); ArgNo != e; ++ArgNo) {
3706 bool needsLoad =
false;
3707 EVT ObjectVT = Ins[ArgNo].VT;
3709 unsigned ArgSize = ObjSize;
3711 if (Ins[ArgNo].isOrigArg()) {
3712 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
3713 CurArgIdx = Ins[ArgNo].getOrigArgIndex();
3715 unsigned CurArgOffset = ArgOffset;
3720 if (isVarArg || isPPC64) {
3721 MinReservedArea = ((MinReservedArea+15)/16)*16;
3725 }
else nAltivecParamsAtEnd++;
3735 assert(Ins[ArgNo].isOrigArg() &&
"Byval arguments cannot be implicit");
3739 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3742 if (ObjSize==1 || ObjSize==2) {
3743 CurArgOffset = CurArgOffset + (4 - ObjSize);
3749 if (ObjSize==1 || ObjSize==2) {
3750 if (GPR_idx != Num_GPR_Regs) {
3753 VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
3755 VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
3765 ArgOffset += PtrByteSize;
3769 for (
unsigned j = 0; j <
ArgSize; j += PtrByteSize) {
3773 if (GPR_idx != Num_GPR_Regs) {
3776 VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
3778 VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
3786 ArgOffset += PtrByteSize;
3788 ArgOffset += ArgSize - (ArgOffset-CurArgOffset);
3800 if (GPR_idx != Num_GPR_Regs) {
3801 unsigned VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
3810 ArgSize = PtrByteSize;
3813 ArgOffset += PtrByteSize;
3818 if (GPR_idx != Num_GPR_Regs) {
3819 unsigned VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
3822 if (ObjectVT == MVT::i32 || ObjectVT ==
MVT::i1)
3825 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
3830 ArgSize = PtrByteSize;
3840 if (GPR_idx != Num_GPR_Regs) {
3842 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64)
3845 if (FPR_idx != Num_FPR_Regs) {
3860 ArgOffset += isPPC64 ? 8 : ObjSize;
3868 if (VR_idx != Num_VR_Regs) {
3869 unsigned VReg = MF.
addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
3872 while ((ArgOffset % 16) != 0) {
3873 ArgOffset += PtrByteSize;
3874 if (GPR_idx != Num_GPR_Regs)
3878 GPR_idx =
std::min(GPR_idx+4, Num_GPR_Regs);
3882 if (!isVarArg && !isPPC64) {
3884 CurArgOffset = VecArgOffset;
3888 ArgOffset = ((ArgOffset+15)/16)*16;
3889 CurArgOffset = ArgOffset;
3901 CurArgOffset + (ArgSize - ObjSize),
3911 if (nAltivecParamsAtEnd) {
3912 MinReservedArea = ((MinReservedArea+15)/16)*16;
3913 MinReservedArea += 16*nAltivecParamsAtEnd;
3917 MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize);
3930 int Depth = ArgOffset;
3940 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
3944 VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
3946 VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
3958 if (!MemOps.
empty())
3967 unsigned ParamSize) {
3969 if (!isTailCall)
return 0;
3973 int SPDiff = (int)CallerMinReservedArea - (
int)ParamSize;
3975 if (SPDiff < FI->getTailCallSPDelta())
4001 if (
const auto *
F = dyn_cast<Function>(GV)) {
4034 const unsigned PtrByteSize = 8;
4038 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4039 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4043 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4047 const unsigned NumFPRs = 13;
4049 const unsigned ParamAreaSize = NumGPRs * PtrByteSize;
4051 unsigned NumBytes = LinkageSize;
4052 unsigned AvailableFPRs = NumFPRs;
4053 unsigned AvailableVRs = NumVRs;
4056 if (Param.Flags.isNest())
continue;
4059 PtrByteSize, LinkageSize, ParamAreaSize,
4060 NumBytes, AvailableFPRs, AvailableVRs,
4076 for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) {
4077 const Value* CalleeArg = *CalleeArgIter;
4078 const Value* CallerArg = &(*CallerArgIter);
4079 if (CalleeArg == CallerArg)
4087 isa<UndefValue>(CalleeArg))
4097 PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4(
4107 if (
DisableSCO && !TailCallOpt)
return false;
4110 if (isVarArg)
return false;
4117 if (CallerCC != CalleeCC)
return false;
4136 !isa<ExternalSymbolSDNode>(Callee))
4167 PPCTargetLowering::IsEligibleForTailCallOptimization(
SDValue Callee,
4183 for (
unsigned i = 0;
i != Ins.
size();
i++) {
4185 if (Flags.
isByVal())
return false;
4195 return G->getGlobal()->hasHiddenVisibility()
4196 ||
G->getGlobal()->hasProtectedVisibility();
4206 if (!C)
return nullptr;
4209 if ((Addr & 3) != 0 ||
4210 SignExtend32<26>(Addr) != Addr)
4222 struct TailCallArgumentInfo {
4227 TailCallArgumentInfo() : FrameIdx(0) {}
4236 for (
unsigned i = 0, e = TailCallArgs.
size();
i != e; ++
i) {
4238 SDValue FIN = TailCallArgs[
i].FrameIdxOp;
4239 int FI = TailCallArgs[
i].FrameIdx;
4242 Chain, dl, Arg, FIN,
4251 int SPDiff,
const SDLoc &dl) {
4257 bool isPPC64 = Subtarget.
isPPC64();
4258 int SlotSize = isPPC64 ? 8 : 4;
4259 int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset();
4261 NewRetAddrLoc,
true);
4264 Chain = DAG.
getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
4270 int NewFPLoc = SPDiff + FL->getFramePointerSaveOffset();
4274 Chain = DAG.
getStore(Chain, dl, OldFP, NewFramePtrIdx,
4286 SDValue Arg,
int SPDiff,
unsigned ArgOffset,
4288 int Offset = ArgOffset + SPDiff;
4293 TailCallArgumentInfo Info;
4295 Info.FrameIdxOp = FIN;
4303 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(
4309 LROpOut = getReturnAddrFrameIndex(DAG);
4316 FPOpOut = getFramePointerFrameIndex(DAG);
4343 SDValue PtrOff,
int SPDiff,
unsigned ArgOffset,
bool isPPC64,
4366 const SDLoc &dl,
int SPDiff,
unsigned NumBytes,
SDValue LROp,
4376 if (!MemOpChains2.
empty())
4396 return G->getGlobal()->getValueType()->isFunctionTy();
4404 SDValue CallSeqStart,
const SDLoc &dl,
int SPDiff,
bool isTailCall,
4405 bool isPatchPoint,
bool hasNest,
4410 bool isPPC64 = Subtarget.
isPPC64();
4420 bool needIndirectCall =
true;
4421 if (!isSVR4ABI || !isPPC64)
4425 needIndirectCall =
false;
4434 if (
auto *
G = dyn_cast<GlobalAddressSDNode>(Callee))
4435 GV =
G->getGlobal();
4437 bool UsePlt = !Local && Subtarget.
isTargetELF() && !isPPC64;
4443 unsigned OpFlags = 0;
4452 needIndirectCall =
false;
4456 unsigned char OpFlags = 0;
4463 needIndirectCall =
false;
4473 needIndirectCall =
false;
4476 if (needIndirectCall) {
4479 SDValue MTCTROps[] = {Chain, Callee, InFlag};
4481 if (isSVR4ABI && isPPC64 && !isELFv2ABI) {
4553 MTCTROps[0] = Chain;
4554 MTCTROps[1] = LoadFuncPtr;
4555 MTCTROps[2] = InFlag;
4569 if (isSVR4ABI && isPPC64 && !isELFv2ABI && !hasNest)
4587 for (
unsigned i = 0, e = RegsToPass.size();
i != e; ++
i)
4589 RegsToPass[i].second.getValueType()));
4593 if (isSVR4ABI && isPPC64 && !isPatchPoint) {
4601 SDValue PPCTargetLowering::LowerCallResult(
4612 for (
unsigned i = 0, e = RVLocs.
size();
i != e; ++
i) {
4645 SDValue PPCTargetLowering::FinishCall(
4653 std::vector<EVT> NodeTys;
4655 unsigned CallOpc =
PrepareCall(DAG, Callee, InFlag, Chain, CallSeqStart, dl,
4656 SPDiff, isTailCall, isPatchPoint, hasNest,
4657 RegsToPass, Ops, NodeTys, CS, Subtarget);
4666 int BytesCalleePops =
4674 assert(Mask &&
"Missing call preserved mask for calling convention");
4683 cast<RegisterSDNode>(Callee)->
getReg() == PPC::CTR) ||
4686 isa<ConstantSDNode>(Callee)) &&
4687 "Expecting an global address, external symbol, absolute value or register");
4733 Chain = DAG.
getNode(CallOpc, dl, NodeTys, Ops);
4742 return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
4743 Ins, dl, DAG, InVals);
4767 IsEligibleForTailCallOptimization_64SVR4(Callee, CallConv, CS,
4768 isVarArg, Outs, Ins, DAG);
4770 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
4777 assert(isa<GlobalAddressSDNode>(Callee) &&
4778 "Callee should be an llvm::Function object.");
4780 const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
4781 const unsigned Width = 80 - strlen(
"TCO caller: ")
4782 - strlen(
", callee linkage: 0, 0");
4783 dbgs() <<
"TCO caller: "
4785 <<
", callee linkage: "
4793 "site marked musttail");
4798 if (Subtarget.
useLongCalls() && isa<GlobalAddressSDNode>(Callee) &&
4800 Callee = LowerGlobalAddress(Callee, DAG);
4804 return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg,
4805 isTailCall, isPatchPoint, Outs, OutVals, Ins,
4806 dl, DAG, InVals, CS);
4808 return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg,
4809 isTailCall, isPatchPoint, Outs, OutVals, Ins,
4810 dl, DAG, InVals, CS);
4813 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg,
4814 isTailCall, isPatchPoint, Outs, OutVals, Ins,
4815 dl, DAG, InVals, CS);
4818 SDValue PPCTargetLowering::LowerCall_32SVR4(
4820 bool isTailCall,
bool isPatchPoint,
4832 unsigned PtrByteSize = 4;
4857 CCInfo.PreAnalyzeCallOperands(Outs);
4863 unsigned NumArgs = Outs.
size();
4865 for (
unsigned i = 0;
i != NumArgs; ++
i) {
4866 MVT ArgVT = Outs[
i].VT;
4870 if (Outs[
i].IsFixed) {
4880 errs() <<
"Call operand #" <<
i <<
" has unhandled type "
4888 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4);
4890 CCInfo.clearWasPPCF128();
4897 CCByValInfo.
AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
4899 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal);
4904 unsigned NumBytes = CCByValInfo.getNextStackOffset();
4919 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
4930 bool seenFloatArg =
false;
4932 for (
unsigned i = 0, j = 0, e = ArgLocs.
size();
4944 assert((j < ByValArgLocs.
size()) &&
"Index out of bounds!");
4963 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
4966 DAG.ReplaceAllUsesWith(CallSeqStart.
getNode(),
4967 NewCallSeqStart.getNode());
4968 Chain = CallSeqStart = NewCallSeqStart;
4989 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5003 if (!MemOpChains.
empty())
5009 for (
unsigned i = 0, e = RegsToPass.
size();
i != e; ++
i) {
5010 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[
i].first,
5011 RegsToPass[
i].second, InFlag);
5019 SDValue Ops[] = { Chain, InFlag };
5031 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint,
5033 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff,
5034 NumBytes, Ins, InVals, CS);
5039 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq(
5046 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
5049 DAG.ReplaceAllUsesWith(CallSeqStart.
getNode(),
5050 NewCallSeqStart.getNode());
5051 return NewCallSeqStart;
5054 SDValue PPCTargetLowering::LowerCall_64SVR4(
5056 bool isTailCall,
bool isPatchPoint,
5065 unsigned NumOps = Outs.
size();
5066 bool hasNest =
false;
5067 bool IsSibCall =
false;
5070 unsigned PtrByteSize = 8;
5087 "fastcc not supported on varargs functions");
5094 unsigned NumBytes = LinkageSize;
5095 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
5096 unsigned &QFPR_idx = FPR_idx;
5099 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
5100 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
5104 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
5108 const unsigned NumFPRs = 13;
5110 const unsigned NumQFPRs = NumFPRs;
5114 unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0;
5117 for (
unsigned i = 0;
i != NumOps; ++
i) {
5119 EVT ArgVT = Outs[
i].VT;
5120 EVT OrigVT = Outs[
i].ArgVT;
5134 if (++NumGPRsUsed <= NumGPRs)
5143 if (++NumVRsUsed <= NumVRs)
5149 if (Subtarget.
hasQPX()) {
5150 if (++NumFPRsUsed <= NumFPRs)
5153 if (++NumVRsUsed <= NumVRs)
5161 if (++NumFPRsUsed <= NumFPRs)
5170 NumBytes = ((NumBytes + Align - 1) / Align) * Align;
5174 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
5177 unsigned NumBytesActuallyUsed = NumBytes;
5185 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
5214 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5225 unsigned ArgOffset = LinkageSize;
5231 for (
unsigned i = 0;
i != NumOps; ++
i) {
5234 EVT ArgVT = Outs[
i].VT;
5235 EVT OrigVT = Outs[
i].ArgVT;
5244 auto ComputePtrOff = [&]() {
5248 ArgOffset = ((ArgOffset + Align - 1) / Align) * Align;
5259 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
5260 GPR_idx =
std::min(GPR_idx, NumGPRs);
5290 if (Size==1 || Size==2 || Size==4) {
5292 if (GPR_idx != NumGPRs) {
5296 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
5298 ArgOffset += PtrByteSize;
5303 if (GPR_idx == NumGPRs && Size < 8) {
5305 if (!isLittleEndian) {
5310 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
5313 ArgOffset += PtrByteSize;
5330 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
5335 if (Size < 8 && GPR_idx != NumGPRs) {
5345 if (!isLittleEndian) {
5349 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
5357 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
5360 ArgOffset += PtrByteSize;
5366 for (
unsigned j=0; j<Size; j+=PtrByteSize) {
5369 if (GPR_idx != NumGPRs) {
5373 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
5374 ArgOffset += PtrByteSize;
5376 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
5390 RegsToPass.
push_back(std::make_pair(PPC::X11, Arg));
5398 if (GPR_idx != NumGPRs) {
5399 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Arg));
5405 true, isTailCall,
false, MemOpChains,
5406 TailCallArguments, dl);
5408 ArgOffset += PtrByteSize;
5411 ArgOffset += PtrByteSize;
5424 bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs;
5425 bool NeededLoad =
false;
5428 if (FPR_idx != NumFPRs)
5429 RegsToPass.
push_back(std::make_pair(
FPR[FPR_idx++], Arg));
5432 if (!NeedGPROrStack)
5454 }
else if (ArgOffset % PtrByteSize != 0) {
5458 if (!isLittleEndian)
5466 if (!isLittleEndian)
5476 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], ArgVal));
5490 true, isTailCall,
false, MemOpChains,
5491 TailCallArguments, dl);
5502 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
5513 if (!Subtarget.
hasQPX()) {
5528 if (VR_idx != NumVRs) {
5532 RegsToPass.
push_back(std::make_pair(VR[VR_idx++], Load));
5535 for (
unsigned i=0;
i<16;
i+=PtrByteSize) {
5536 if (GPR_idx == NumGPRs)
5543 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
5549 if (VR_idx != NumVRs) {
5550 RegsToPass.
push_back(std::make_pair(VR[VR_idx++], Arg));
5556 true, isTailCall,
true, MemOpChains,
5557 TailCallArguments, dl);
5568 "Invalid QPX parameter type");
5580 if (QFPR_idx != NumQFPRs) {
5584 RegsToPass.
push_back(std::make_pair(
QFPR[QFPR_idx++], Load));
5586 ArgOffset += (IsF32 ? 16 : 32);
5587 for (
unsigned i = 0;
i < (IsF32 ? 16U : 32U);
i += PtrByteSize) {
5588 if (GPR_idx == NumGPRs)
5595 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
5601 if (QFPR_idx != NumQFPRs) {
5602 RegsToPass.
push_back(std::make_pair(
QFPR[QFPR_idx++], Arg));
5608 true, isTailCall,
true, MemOpChains,
5609 TailCallArguments, dl);
5611 ArgOffset += (IsF32 ? 16 : 32);
5615 ArgOffset += (IsF32 ? 16 : 32);
5621 assert(NumBytesActuallyUsed == ArgOffset);
5622 (void)NumBytesActuallyUsed;
5624 if (!MemOpChains.
empty())
5630 if (!isTailCall && !isPatchPoint &&
5632 !isa<ExternalSymbolSDNode>(Callee)) {
5646 if (isELFv2ABI && !isPatchPoint)
5647 RegsToPass.
push_back(std::make_pair((
unsigned)PPC::X12, Callee));
5653 for (
unsigned i = 0, e = RegsToPass.
size(); i != e; ++
i) {
5654 Chain = DAG.
getCopyToReg(Chain, dl, RegsToPass[i].first,
5655 RegsToPass[i].second, InFlag);
5659 if (isTailCall && !IsSibCall)
5663 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint, hasNest,
5664 DAG, RegsToPass, InFlag, Chain, CallSeqStart, Callee,
5665 SPDiff, NumBytes, Ins, InVals, CS);
5668 SDValue PPCTargetLowering::LowerCall_Darwin(
5670 bool isTailCall,
bool isPatchPoint,
5677 unsigned NumOps = Outs.
size();
5681 unsigned PtrByteSize = isPPC64 ? 8 : 4;
5698 unsigned NumBytes = LinkageSize;
5706 unsigned nAltivecParamsAtEnd = 0;
5707 for (
unsigned i = 0; i != NumOps; ++
i) {
5709 EVT ArgVT = Outs[
i].VT;
5714 if (!isVarArg && !isPPC64) {
5717 nAltivecParamsAtEnd++;
5721 NumBytes = ((NumBytes+15)/16)*16;
5727 if (nAltivecParamsAtEnd) {
5728 NumBytes = ((NumBytes+15)/16)*16;
5729 NumBytes += 16*nAltivecParamsAtEnd;
5737 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
5762 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5777 unsigned ArgOffset = LinkageSize;
5778 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
5782 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
5785 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
5786 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
5790 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
5793 const unsigned NumFPRs = 13;
5796 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
5802 for (
unsigned i = 0; i != NumOps; ++
i) {
5828 if (Size==1 || Size==2) {
5830 if (GPR_idx != NumGPRs) {
5834 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
5836 ArgOffset += PtrByteSize;
5841 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
5844 ArgOffset += PtrByteSize;
5851 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
5858 for (
unsigned j=0; j<Size; j+=PtrByteSize) {
5861 if (GPR_idx != NumGPRs) {
5865 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
5866 ArgOffset += PtrByteSize;
5868 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
5880 if (GPR_idx != NumGPRs) {
5884 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Arg));
5887 isPPC64, isTailCall,
false, MemOpChains,
5888 TailCallArguments, dl);
5890 ArgOffset += PtrByteSize;
5894 if (FPR_idx != NumFPRs) {
5895 RegsToPass.
push_back(std::make_pair(
FPR[FPR_idx++], Arg));
5903 if (GPR_idx != NumGPRs) {
5907 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
5915 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
5921 if (GPR_idx != NumGPRs)
5929 isPPC64, isTailCall,
false, MemOpChains,
5930 TailCallArguments, dl);
5946 while (ArgOffset % 16 !=0) {
5947 ArgOffset += PtrByteSize;
5948 if (GPR_idx != NumGPRs)
5958 if (VR_idx != NumVRs) {
5962 RegsToPass.
push_back(std::make_pair(VR[VR_idx++], Load));
5965 for (
unsigned i=0; i<16; i+=PtrByteSize) {
5966 if (GPR_idx == NumGPRs)
5973 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
5980 if (VR_idx != NumVRs) {
5982 RegsToPass.
push_back(std::make_pair(VR[VR_idx++], Arg));
5983 }
else if (nAltivecParamsAtEnd==0) {
5986 isPPC64, isTailCall,
true, MemOpChains,
5987 TailCallArguments, dl);
5998 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) {
6001 ArgOffset = ((ArgOffset+15)/16)*16;
6003 for (
unsigned i = 0; i != NumOps; ++
i) {
6005 EVT ArgType = Outs[
i].VT;
6012 isPPC64, isTailCall,
true, MemOpChains,
6013 TailCallArguments, dl);
6020 if (!MemOpChains.
empty())
6028 !isa<ExternalSymbolSDNode>(Callee) &&
6030 RegsToPass.
push_back(std::make_pair((
unsigned)(isPPC64 ? PPC::X12 :
6031 PPC::R12), Callee));
6036 for (
unsigned i = 0, e = RegsToPass.
size(); i != e; ++
i) {
6037 Chain = DAG.
getCopyToReg(Chain, dl, RegsToPass[i].first,
6038 RegsToPass[i].second, InFlag);
6046 return FinishCall(CallConv, dl, isTailCall, isVarArg, isPatchPoint,
6048 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff,
6049 NumBytes, Ins, InVals, CS);
6058 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
6059 return CCInfo.CheckReturn(Outs, RetCC_PPC);
6078 for (
unsigned i = 0; i != RVLocs.
size(); ++
i) {
6109 if (PPC::G8RCRegClass.
contains(*I))
6111 else if (PPC::F8RCRegClass.
contains(*I))
6113 else if (PPC::CRRCRegClass.
contains(*I))
6115 else if (PPC::VRRCRegClass.
contains(*I))
6126 RetOps.push_back(Flag);
6132 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(
SDValue Op,
6141 SDValue FPSIdx = getFramePointerFrameIndex(DAG);
6143 SDValue Ops[2] = {Chain, FPSIdx};
6157 bool isPPC64 = Subtarget.
isPPC64();
6158 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1;
6178 bool isPPC64 = Subtarget.
isPPC64();
6193 FI->setReturnAddrSaveIndex(RASI);
6199 PPCTargetLowering::getFramePointerFrameIndex(
SelectionDAG & DAG)
const {
6201 bool isPPC64 = Subtarget.
isPPC64();
6216 FI->setFramePointerSaveIndex(FPSI);
6234 SDValue FPSIdx = getFramePointerFrameIndex(DAG);
6236 SDValue Ops[3] = { Chain, NegSize, FPSIdx };
6245 bool isPPC64 = Subtarget.
isPPC64();
6269 return LowerVectorLoad(Op, DAG);
6272 "Custom lowering only for i1 loads");
6294 return LowerVectorStore(Op, DAG);
6297 "Custom lowering only for i1 stores");
6317 "Custom lowering only for i1 results");
6428 void PPCTargetLowering::LowerFP_TO_INTForReuse(
SDValue Op, ReuseLoadInfo &RLI,
6430 const SDLoc &dl)
const {
6433 if (Src.getValueType() ==
MVT::f32)
6448 "i64 FP_TO_UINT is supported only with FPCVT");
6459 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex();
6493 const SDLoc &dl)
const {
6497 if (Src.getValueType() ==
MVT::f32)
6513 "i64 FP_TO_UINT is supported only with FPCVT");
6524 const SDLoc &dl)
const {
6526 return LowerFP_TO_INTDirectMove(Op, DAG, dl);
6529 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
6532 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
6543 bool PPCTargetLowering::canReuseLoadAddress(
SDValue Op,
EVT MemVT,
6554 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
6568 "Non-pre-inc AM on PPC?");
6588 void PPCTargetLowering::spliceIntoChain(
SDValue ResChain,
6594 SDLoc dl(NewResChain);
6599 "A new TF really is required here");
6608 bool PPCTargetLowering::directMoveIsProfitable(
const SDValue &Op)
const {
6624 if (UI.getUse().get().getResNo() != 0)
6640 const SDLoc &dl)
const {
6643 "Invalid floating point type as target of conversion");
6645 "Int to FP conversions with direct moves require FPCVT");
6683 Value = DAG.
getNode(
ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
6703 if (Subtarget.
hasDirectMove() && directMoveIsProfitable(Op) &&
6705 return LowerINT_TO_FPDirectMove(Op, DAG, dl);
6708 "UINT_TO_FP is supported only with FPCVT");
6773 if (canReuseLoadAddress(SINT,
MVT::i64, RLI, DAG)) {
6774 Bits = DAG.
getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI,
6775 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
6776 spliceIntoChain(RLI.ResChain, Bits.
getValue(1), DAG);
6778 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG,
ISD::SEXTLOAD)) {
6781 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
6782 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
6786 spliceIntoChain(RLI.ResChain, Bits.
getValue(1), DAG);
6788 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG,
ISD::ZEXTLOAD)) {
6791 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
6792 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
6796 spliceIntoChain(RLI.ResChain, Bits.
getValue(1), DAG);
6813 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
6814 "Expected an i32 store");
6824 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
6825 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
6842 "Unhandled INT_TO_FP type in custom expander!");
6865 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
6866 "Expected an i32 store");
6877 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
6878 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
6884 spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG);
6887 "i32->FP without LFIWAX supported only on PPC64");
6902 MVT::f64, dl, Store, FIdx,
7002 SDValue OutOps[] = { OutLo, OutHi };
7031 SDValue OutOps[] = { OutLo, OutHi };
7060 SDValue OutOps[] = { OutLo, OutHi };
7072 assert(Val >= -16 && Val <= 15 &&
"vsplti is out of range!");
7074 static const MVT VTys[] = {
7084 EVT CanonicalVT = VTys[SplatSize-1];
7116 DAG.
getConstant(IID, dl, MVT::i32), Op0, Op1, Op2);
7128 for (
unsigned i = 0; i != 16; ++
i)
7147 bool HasDirectMove) {
7154 bool IsSplat =
true;
7155 bool IsLoad =
false;
7182 return !(IsSplat && IsLoad);
7194 assert(BVN &&
"Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
7208 "BUILD_VECTOR for v4i1 does not have 4 operands");
7210 bool IsConst =
true;
7211 for (
unsigned i = 0; i < 4; ++
i) {
7213 if (!isa<ConstantSDNode>(BVN->
getOperand(i))) {
7226 for (
unsigned i = 0; i < 4; ++
i) {
7247 for (
unsigned i = 0; i < 4; ++
i) {
7255 if (StoreSize > 4) {
7270 if (!Stores.
empty())
7281 DAG.
getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32),
7288 DAG.
getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32),
7301 APInt APSplatBits, APSplatUndef;
7302 unsigned SplatBitSize;
7306 SplatBitSize > 32) {
7310 if (Subtarget.
hasVSX() &&
7318 unsigned SplatSize = SplatBitSize / 8;
7323 if (SplatBits == 0) {
7349 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >>
7351 if (SextVal >= -16 && SextVal <= 15)
7363 if (SextVal >= -32 && SextVal <= 31) {
7381 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
7395 static const signed char SplatCsts[] = {
7396 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
7397 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
7403 int i = SplatCsts[idx];
7407 unsigned TypeShiftAmt = i & (SplatBitSize-1);
7410 if (SextVal == (
int)((
unsigned)i << TypeShiftAmt)) {
7412 static const unsigned IIDs[] = {
7413 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
7414 Intrinsic::ppc_altivec_vslw
7421 if (SextVal == (
int)((
unsigned)i >> TypeShiftAmt)) {
7423 static const unsigned IIDs[] = {
7424 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
7425 Intrinsic::ppc_altivec_vsrw
7432 if (SextVal == (
int)((
unsigned)i >> TypeShiftAmt)) {
7434 static const unsigned IIDs[] = {
7435 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0,
7436 Intrinsic::ppc_altivec_vsraw
7443 if (SextVal == (
int)(((
unsigned)i << TypeShiftAmt) |
7444 ((
unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
7446 static const unsigned IIDs[] = {
7447 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
7448 Intrinsic::ppc_altivec_vrlw
7455 if (SextVal == (
int)(((
unsigned)i << 8) | (i < 0 ? 0xFF : 0))) {
7461 if (SextVal == (
int)(((
unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) {
7467 if (SextVal == (
int)(((
unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
7482 unsigned OpNum = (PFEntry >> 26) & 0x0F;
7483 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
7484 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
7499 if (OpNum == OP_COPY) {
7500 if (LHSID == (1*9+2)*9+3)
return LHS;
7501 assert(LHSID == ((4*9+5)*9+6)*9+7 &&
"Illegal OP_COPY!");
7513 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3;
7514 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
7515 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7;
7516 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
7519 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
7520 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
7521 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
7522 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
7525 for (
unsigned i = 0; i != 16; ++
i)
7526 ShufIdxs[i] = (i&3)+0;
7529 for (
unsigned i = 0; i != 16; ++
i)
7530 ShufIdxs[i] = (i&3)+4;
7533 for (
unsigned i = 0; i != 16; ++
i)
7534 ShufIdxs[i] = (i&3)+8;
7537 for (
unsigned i = 0; i != 16; ++
i)
7538 ShufIdxs[i] = (i&3)+12;
7567 unsigned ShiftElts, InsertAtByte;
7588 if (Subtarget.
hasVSX()) {
7595 ((isLittleEndian && SplatIdx == 3) ||
7596 (!isLittleEndian && SplatIdx == 0))) {
7618 if (Subtarget.
hasQPX()) {
7619 if (VT.getVectorNumElements() != 4)
7625 if (AlignIdx != -1) {
7630 if (SplatIdx >= 4) {
7643 for (
unsigned i = 0; i < 4; ++
i) {
7645 unsigned mm = m >= 0 ? (
unsigned) m : i;
7646 idx |= mm << (3-
i)*3;
7681 unsigned int ShuffleKind = isLittleEndian ? 2 : 0;
7701 unsigned PFIndexes[4];
7702 bool isFourElementShuffle =
true;
7703 for (
unsigned i = 0; i != 4 && isFourElementShuffle; ++
i) {
7705 for (
unsigned j = 0; j != 4; ++j) {
7706 if (PermMask[i*4+j] < 0)
7709 unsigned ByteSource = PermMask[i*4+j];
7710 if ((ByteSource & 3) != j) {
7711 isFourElementShuffle =
false;
7716 EltNo = ByteSource/4;
7717 }
else if (EltNo != ByteSource/4) {
7718 isFourElementShuffle =
false;
7722 PFIndexes[
i] = EltNo;
7730 if (isFourElementShuffle && !isLittleEndian) {
7732 unsigned PFTableIndex =
7733 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
7736 unsigned Cost = (PFEntry >> 30);
7768 for (
unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++
i) {
7769 unsigned SrcElt = PermMask[
i] < 0 ? 0 : PermMask[
i];
7771 for (
unsigned j = 0; j != BytesPerElement; ++j)
7794 unsigned IntrinsicID =
7795 cast<ConstantSDNode>(Intrin.
getOperand(0))->getZExtValue();
7798 switch (IntrinsicID) {
7799 default:
return false;
7801 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1;
break;
7802 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1;
break;
7803 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1;
break;
7804 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1;
break;
7805 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1;
break;
7806 case Intrinsic::ppc_altivec_vcmpequd_p:
7814 case Intrinsic::ppc_altivec_vcmpneb_p:
7815 case Intrinsic::ppc_altivec_vcmpneh_p:
7816 case Intrinsic::ppc_altivec_vcmpnew_p:
7817 case Intrinsic::ppc_altivec_vcmpnezb_p:
7818 case Intrinsic::ppc_altivec_vcmpnezh_p:
7819 case Intrinsic::ppc_altivec_vcmpnezw_p:
7821 switch(IntrinsicID) {
7823 case Intrinsic::ppc_altivec_vcmpneb_p: CompareOpc = 7;
break;
7824 case Intrinsic::ppc_altivec_vcmpneh_p: CompareOpc = 71;
break;
7825 case Intrinsic::ppc_altivec_vcmpnew_p: CompareOpc = 135;
break;
7826 case Intrinsic::ppc_altivec_vcmpnezb_p: CompareOpc = 263;
break;
7827 case Intrinsic::ppc_altivec_vcmpnezh_p: CompareOpc = 327;
break;
7828 case Intrinsic::ppc_altivec_vcmpnezw_p: CompareOpc = 391;
break;
7835 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1;
break;
7836 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1;
break;
7837 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1;
break;
7838 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1;
break;
7839 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1;
break;
7840 case Intrinsic::ppc_altivec_vcmpgtsd_p:
7848 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1;
break;
7849 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1;
break;
7850 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1;
break;
7851 case Intrinsic::ppc_altivec_vcmpgtud_p:
7860 case Intrinsic::ppc_vsx_xvcmpeqdp_p:
7861 case Intrinsic::ppc_vsx_xvcmpgedp_p:
7862 case Intrinsic::ppc_vsx_xvcmpgtdp_p:
7863 case Intrinsic::ppc_vsx_xvcmpeqsp_p:
7864 case Intrinsic::ppc_vsx_xvcmpgesp_p:
7865 case Intrinsic::ppc_vsx_xvcmpgtsp_p:
7866 if (Subtarget.
hasVSX()) {
7867 switch (IntrinsicID) {
7868 case Intrinsic::ppc_vsx_xvcmpeqdp_p: CompareOpc = 99;
break;
7869 case Intrinsic::ppc_vsx_xvcmpgedp_p: CompareOpc = 115;
break;
7870 case Intrinsic::ppc_vsx_xvcmpgtdp_p: CompareOpc = 107;
break;
7871 case Intrinsic::ppc_vsx_xvcmpeqsp_p: CompareOpc = 67;
break;
7872 case Intrinsic::ppc_vsx_xvcmpgesp_p: CompareOpc = 83;
break;
7873 case Intrinsic::ppc_vsx_xvcmpgtsp_p: CompareOpc = 75;
break;
7883 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0;
break;
7884 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0;
break;
7885 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0;
break;
7886 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0;
break;
7887 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0;
break;
7888 case Intrinsic::ppc_altivec_vcmpequd:
7896 case Intrinsic::ppc_altivec_vcmpneb:
7897 case Intrinsic::ppc_altivec_vcmpneh:
7898 case Intrinsic::ppc_altivec_vcmpnew:
7899 case Intrinsic::ppc_altivec_vcmpnezb:
7900 case Intrinsic::ppc_altivec_vcmpnezh:
7901 case Intrinsic::ppc_altivec_vcmpnezw:
7903 switch (IntrinsicID) {
7905 case Intrinsic::ppc_altivec_vcmpneb: CompareOpc = 7;
break;
7906 case Intrinsic::ppc_altivec_vcmpneh: CompareOpc = 71;
break;
7907 case Intrinsic::ppc_altivec_vcmpnew: CompareOpc = 135;
break;
7908 case Intrinsic::ppc_altivec_vcmpnezb: CompareOpc = 263;
break;
7909 case Intrinsic::ppc_altivec_vcmpnezh: CompareOpc = 327;
break;
7910 case Intrinsic::ppc_altivec_vcmpnezw: CompareOpc = 391;
break;
7916 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0;
break;
7917 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0;
break;
7918 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0;
break;
7919 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0;
break;
7920 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0;
break;
7921 case Intrinsic::ppc_altivec_vcmpgtsd:
7929 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0;
break;
7930 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0;
break;
7931 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0;
break;
7932 case Intrinsic::ppc_altivec_vcmpgtud:
7948 unsigned IntrinsicID =
7949 cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
7951 if (IntrinsicID == Intrinsic::thread_pointer) {
7953 bool is64bit = Subtarget.
isPPC64();
7954 return DAG.
getRegister(is64bit ? PPC::X13 : PPC::R2,
7992 switch (cast<ConstantSDNode>(Op.
getOperand(1))->getZExtValue()) {
7995 BitNo = 0; InvertBit =
false;
7998 BitNo = 0; InvertBit =
true;
8001 BitNo = 2; InvertBit =
false;
8004 BitNo = 2; InvertBit =
true;
8065 "Should only be called for ISD::INSERT_VECTOR_ELT");
8079 "Unknown extract_vector_elt type");
8095 Value = DAG.
getNode(
ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
8099 DAG.
getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32),
8111 DAG.
getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32),
8119 unsigned Offset = 4*cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue();
8154 for (
unsigned Idx = 0; Idx < 4; ++Idx) {
8156 if (ScalarVT != ScalarMemVT)
8160 ScalarMemVT,
MinAlign(Alignment, Idx * Stride),
8163 Load = DAG.
getLoad(ScalarVT, dl, LoadChain, BasePtr,
8170 "Unknown addressing mode on vector load");
8176 LoadChains[Idx] = Load.
getValue(1);
8180 BasePtr.getValueType()));
8191 SDValue RetOps[] = { Value, TF };
8201 SDValue VectElmts[4], VectElmtChains[4];
8202 for (
unsigned i = 0; i < 4; ++
i) {
8204 Idx = DAG.
getNode(
ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx);
8210 VectElmtChains[
i] = VectElmts[
i].
getValue(1);
8216 SDValue RVals[] = { Value, LoadChain };
8243 for (
unsigned Idx = 0; Idx < 4; ++Idx) {
8248 if (ScalarVT != ScalarMemVT)
8252 ScalarMemVT,
MinAlign(Alignment, Idx * Stride),
8255 Store = DAG.
getStore(StoreChain, dl, Ex, BasePtr,
8262 "Unknown addressing mode on vector store");
8270 Stores[Idx] =
Store;
8295 Value = DAG.
getNode(
ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
8299 DAG.
getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32),
8310 DAG.
getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32),
8318 SDValue Loads[4], LoadChains[4];
8319 for (
unsigned i = 0; i < 4; ++
i) {
8320 unsigned Offset = 4*
i;
8324 Loads[
i] = DAG.
getLoad(MVT::i32, dl, StoreChain, Idx,
8332 for (
unsigned i = 0; i < 4; ++
i) {
8380 LHS, RHS, Zero, DAG, dl);
8400 for (
unsigned i = 0; i != 8; ++
i) {
8401 if (isLittleEndian) {
8403 Ops[i*2+1] = 2*i+16;
8406 Ops[i*2+1] = 2*i+1+16;
8432 return LowerVASTART(Op, DAG);
8435 return LowerVAARG(Op, DAG);
8438 return LowerVACOPY(Op, DAG);
8441 return LowerSTACKRESTORE(Op, DAG);
8444 return LowerDYNAMIC_STACKALLOC(Op, DAG);
8447 return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
8450 return LowerEH_DWARF_CFA(Op, DAG);
8455 case ISD::LOAD:
return LowerLOAD(Op, DAG);
8479 case ISD::MUL:
return LowerMUL(Op, DAG);
8496 llvm_unreachable(
"Do not know how to custom type legalize this operation!");
8507 if (cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue() !=
8508 Intrinsic::ppc_is_decremented_ctr_nonzero)
8512 "Unexpected result type for CTR decrement intrinsic");
8580 bool IsLoad)
const {
8590 bool IsLoad)
const {
8602 unsigned AtomicSize,
8605 unsigned CmpPred)
const {
8609 auto LoadMnemonic = PPC::LDARX;
8610 auto StoreMnemonic = PPC::STDCX;
8611 switch (AtomicSize) {
8615 LoadMnemonic = PPC::LBARX;
8616 StoreMnemonic = PPC::STBCX;
8620 LoadMnemonic = PPC::LHARX;
8621 StoreMnemonic = PPC::STHCX;
8625 LoadMnemonic = PPC::LWARX;
8626 StoreMnemonic = PPC::STWCX;
8629 LoadMnemonic = PPC::LDARX;
8630 StoreMnemonic = PPC::STDCX;
8657 unsigned TmpReg = (!BinOpcode) ? incr :
8659 : &PPC::GPRCRegClass);
8684 BuildMI(BB, dl, TII->
get(LoadMnemonic), dest)
8685 .addReg(ptrA).
addReg(ptrB);
8690 if (CmpOpcode == PPC::CMPW && AtomicSize < 4) {
8692 BuildMI(BB, dl, TII->
get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH),
8693 ExtReg).addReg(dest);
8694 BuildMI(BB, dl, TII->
get(CmpOpcode), PPC::CR0)
8695 .addReg(incr).
addReg(ExtReg);
8697 BuildMI(BB, dl, TII->
get(CmpOpcode), PPC::CR0)
8698 .addReg(incr).
addReg(dest);
8725 unsigned CmpPred)
const {
8729 CmpOpcode, CmpPred);
8737 bool is64bit = Subtarget.
isPPC64();
8739 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
8765 : &PPC::GPRCRegClass;
8805 if (ptrA != ZeroReg) {
8807 BuildMI(BB, dl, TII->
get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
8808 .addReg(ptrA).
addReg(ptrB);
8812 BuildMI(BB, dl, TII->
get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg)
8814 if (!isLittleEndian)
8815 BuildMI(BB, dl, TII->
get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg)
8816 .addReg(Shift1Reg).
addImm(is8bit ? 24 : 16);
8818 BuildMI(BB, dl, TII->
get(PPC::RLDICR), PtrReg)
8821 BuildMI(BB, dl, TII->
get(PPC::RLWINM), PtrReg)
8823 BuildMI(BB, dl, TII->
get(PPC::SLW), Incr2Reg)
8824 .addReg(incr).
addReg(ShiftReg);
8826 BuildMI(BB, dl, TII->
get(PPC::LI), Mask2Reg).addImm(255);
8828 BuildMI(BB, dl, TII->
get(PPC::LI), Mask3Reg).addImm(0);
8829 BuildMI(BB, dl, TII->
get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).
addImm(65535);
8832 .addReg(Mask2Reg).
addReg(ShiftReg);
8835 BuildMI(BB, dl, TII->
get(PPC::LWARX), TmpDestReg)
8836 .addReg(ZeroReg).
addReg(PtrReg);
8839 .addReg(Incr2Reg).
addReg(TmpDestReg);
8840 BuildMI(BB, dl, TII->
get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg)
8841 .addReg(TmpDestReg).
addReg(MaskReg);
8843 .addReg(TmpReg).
addReg(MaskReg);
8848 BuildMI(BB, dl, TII->
get(is64bit ? PPC::AND8 : PPC::AND), SReg)
8849 .addReg(TmpDestReg).
addReg(MaskReg);
8850 unsigned ValueReg = SReg;
8851 unsigned CmpReg = Incr2Reg;
8852 if (CmpOpcode == PPC::CMPW) {
8854 BuildMI(BB, dl, TII->
get(PPC::SRW), ValueReg)
8855 .addReg(SReg).
addReg(ShiftReg);
8857 BuildMI(BB, dl, TII->
get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg)
8859 ValueReg = ValueSReg;
8862 BuildMI(BB, dl, TII->
get(CmpOpcode), PPC::CR0)
8863 .addReg(CmpReg).
addReg(ValueReg);
8871 .addReg(Tmp3Reg).
addReg(Tmp2Reg);
8882 BuildMI(*BB, BB->
begin(), dl, TII->
get(PPC::SRW), dest).addReg(TmpDestReg)
8911 "Invalid Pointer Size!");
8964 MIB =
BuildMI(*thisMBB, MI, DL, TII->
get(PPC::STD))
8975 BaseReg = Subtarget.
isPPC64() ? PPC::X1 : PPC::R1;
8977 BaseReg = Subtarget.
isPPC64() ? PPC::BP8 : PPC::BP;
8979 MIB =
BuildMI(*thisMBB, MI, DL,
8980 TII->
get(Subtarget.
isPPC64() ? PPC::STD : PPC::STW))
8987 MIB =
BuildMI(*thisMBB, MI, DL, TII->
get(PPC::BCLalways)).addMBB(mainMBB);
8991 BuildMI(*thisMBB, MI, DL, TII->
get(PPC::LI), restoreDstReg).addImm(1);
8993 MIB =
BuildMI(*thisMBB, MI, DL, TII->
get(PPC::EH_SjLj_Setup))
9004 TII->
get(Subtarget.
isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg);
9008 MIB =
BuildMI(mainMBB, DL, TII->
get(PPC::STD))
9013 MIB =
BuildMI(mainMBB, DL, TII->
get(PPC::STW))
9021 BuildMI(mainMBB, DL, TII->
get(PPC::LI), mainDstReg).addImm(0);
9026 TII->
get(PPC::PHI), DstReg)
9027 .addReg(mainDstReg).
addMBB(mainMBB)
9049 "Invalid Pointer Size!");
9052 (PVT ==
MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
9055 unsigned FP = (PVT ==
MVT::i64) ? PPC::X31 : PPC::R31;
9056 unsigned SP = (PVT ==
MVT::i64) ? PPC::X1 : PPC::R1;
9080 MIB =
BuildMI(*MBB, MI, DL, TII->
get(PPC::LWZ), FP)
9089 .addImm(LabelOffset)
9092 MIB =
BuildMI(*MBB, MI, DL, TII->
get(PPC::LWZ), Tmp)
9093 .addImm(LabelOffset)
9104 MIB =
BuildMI(*MBB, MI, DL, TII->
get(PPC::LWZ), SP)
9116 MIB =
BuildMI(*MBB, MI, DL, TII->
get(PPC::LWZ), BP)
9144 if (MI.
getOpcode() == TargetOpcode::STACKMAP ||
9145 MI.
getOpcode() == TargetOpcode::PATCHPOINT) {
9147 MI.
getOpcode() == TargetOpcode::PATCHPOINT) {
9160 if (MI.
getOpcode() == PPC::EH_SjLj_SetJmp32 ||
9161 MI.
getOpcode() == PPC::EH_SjLj_SetJmp64) {
9163 }
else if (MI.
getOpcode() == PPC::EH_SjLj_LongJmp32 ||
9164 MI.
getOpcode() == PPC::EH_SjLj_LongJmp64) {
9182 if (MI.
getOpcode() == PPC::SELECT_CC_I4 ||
9192 }
else if (MI.
getOpcode() == PPC::SELECT_CC_I4 ||
9196 MI.
getOpcode() == PPC::SELECT_CC_QFRC ||
9197 MI.
getOpcode() == PPC::SELECT_CC_QSRC ||
9198 MI.
getOpcode() == PPC::SELECT_CC_QBRC ||
9199 MI.
getOpcode() == PPC::SELECT_CC_VRRC ||
9200 MI.
getOpcode() == PPC::SELECT_CC_VSFRC ||
9201 MI.
getOpcode() == PPC::SELECT_CC_VSSRC ||
9202 MI.
getOpcode() == PPC::SELECT_CC_VSRC ||
9277 }
else if (MI.
getOpcode() == PPC::ReadTB) {
9309 BuildMI(BB, dl, TII->
get(PPC::MFSPR), HiReg).addImm(269);
9310 BuildMI(BB, dl, TII->
get(PPC::MFSPR), LoReg).addImm(268);
9311 BuildMI(BB, dl, TII->
get(PPC::MFSPR), ReadAgainReg).addImm(269);
9316 .addReg(HiReg).
addReg(ReadAgainReg);
9322 }
else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
9324 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
9326 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
9328 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
9331 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
9333 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
9335 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
9337 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
9340 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
9342 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
9344 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
9346 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
9349 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
9351 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
9353 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
9355 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
9358 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
9360 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
9362 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
9364 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
9367 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
9369 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
9371 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
9373 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
9376 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_MIN_I8)
9378 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_MIN_I16)
9380 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_MIN_I32)
9382 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_MIN_I64)
9385 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_MAX_I8)
9387 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_MAX_I16)
9389 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_MAX_I32)
9391 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_MAX_I64)
9394 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8)
9396 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16)
9398 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32)
9400 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64)
9403 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8)
9405 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16)
9407 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32)
9409 else if (MI.
getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64)
9412 else if (MI.
getOpcode() == PPC::ATOMIC_SWAP_I8)
9414 else if (MI.
getOpcode() == PPC::ATOMIC_SWAP_I16)
9416 else if (MI.
getOpcode() == PPC::ATOMIC_SWAP_I32)
9418 else if (MI.
getOpcode() == PPC::ATOMIC_SWAP_I64)
9421 else if (MI.
getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
9422 MI.
getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 ||
9424 MI.
getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) ||
9426 MI.
getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) {
9427 bool is64bit = MI.
getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
9429 auto LoadMnemonic = PPC::LDARX;
9430 auto StoreMnemonic = PPC::STDCX;
9434 case PPC::ATOMIC_CMP_SWAP_I8:
9435 LoadMnemonic = PPC::LBARX;
9436 StoreMnemonic = PPC::STBCX;
9439 case PPC::ATOMIC_CMP_SWAP_I16:
9440 LoadMnemonic = PPC::LHARX;
9441 StoreMnemonic = PPC::STHCX;
9444 case PPC::ATOMIC_CMP_SWAP_I32:
9445 LoadMnemonic = PPC::LWARX;
9446 StoreMnemonic = PPC::STWCX;
9448 case PPC::ATOMIC_CMP_SWAP_I64:
9449 LoadMnemonic = PPC::LDARX;
9450 StoreMnemonic = PPC::STDCX;
9489 BuildMI(BB, dl, TII->
get(LoadMnemonic), dest)
9490 .addReg(ptrA).
addReg(ptrB);
9491 BuildMI(BB, dl, TII->
get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0)
9492 .addReg(oldval).
addReg(dest);
9515 }
else if (MI.
getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
9516 MI.
getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
9520 bool is64bit = Subtarget.
isPPC64();
9522 bool is8bit = MI.
getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
9545 : &PPC::GPRCRegClass;
9562 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
9595 if (ptrA != ZeroReg) {
9597 BuildMI(BB, dl, TII->
get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
9598 .addReg(ptrA).
addReg(ptrB);
9602 BuildMI(BB, dl, TII->
get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg)
9604 if (!isLittleEndian)
9605 BuildMI(BB, dl, TII->
get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg)
9606 .addReg(Shift1Reg).
addImm(is8bit ? 24 : 16);
9608 BuildMI(BB, dl, TII->
get(PPC::RLDICR), PtrReg)
9611 BuildMI(BB, dl, TII->
get(PPC::RLWINM), PtrReg)
9613 BuildMI(BB, dl, TII->
get(PPC::SLW), NewVal2Reg)
9614 .addReg(newval).
addReg(ShiftReg);
9615 BuildMI(BB, dl, TII->
get(PPC::SLW), OldVal2Reg)
9616 .addReg(oldval).
addReg(ShiftReg);
9618 BuildMI(BB, dl, TII->
get(PPC::LI), Mask2Reg).addImm(255);
9620 BuildMI(BB, dl, TII->
get(PPC::LI), Mask3Reg).addImm(0);
9621 BuildMI(BB, dl, TII->
get(PPC::ORI), Mask2Reg)
9622 .addReg(Mask3Reg).
addImm(65535);
9625 .addReg(Mask2Reg).
addReg(ShiftReg);
9627 .addReg(NewVal2Reg).
addReg(MaskReg);
9629 .addReg(OldVal2Reg).
addReg(MaskReg);
9632 BuildMI(BB, dl, TII->
get(PPC::LWARX), TmpDestReg)
9633 .addReg(ZeroReg).
addReg(PtrReg);
9635 .addReg(TmpDestReg).
addReg(MaskReg);
9636 BuildMI(BB, dl, TII->
get(PPC::CMPW), PPC::CR0)
9637 .addReg(TmpReg).
addReg(OldVal3Reg);
9645 .addReg(TmpDestReg).
addReg(MaskReg);
9647 .addReg(Tmp2Reg).
addReg(NewVal3Reg);
9648 BuildMI(BB, dl, TII->
get(PPC::STWCX)).addReg(Tmp4Reg)
9657 BuildMI(BB, dl, TII->
get(PPC::STWCX)).addReg(TmpDestReg)
9666 }
else if (MI.
getOpcode() == PPC::FADDrtz) {
9682 BuildMI(*BB, MI, dl, TII->
get(PPC::MTFSB1)).addImm(31);
9683 BuildMI(*BB, MI, dl, TII->
get(PPC::MTFSB0)).addImm(30);
9692 MI.
getOpcode() == PPC::ANDIo_1_EQ_BIT8 ||
9693 MI.
getOpcode() == PPC::ANDIo_1_GT_BIT8) {
9694 unsigned Opcode = (MI.
getOpcode() == PPC::ANDIo_1_EQ_BIT8 ||
9699 MI.
getOpcode() == PPC::ANDIo_1_EQ_BIT8);
9703 &PPC::GPRCRegClass :
9704 &PPC::G8RCRegClass);
9710 BuildMI(*BB, MI, dl, TII->
get(TargetOpcode::COPY),
9712 .addReg(isEQ ? PPC::CR0EQ : PPC::CR0GT);
9713 }
else if (MI.
getOpcode() == PPC::TCHECK_RET) {
9717 BuildMI(*BB, MI, Dl, TII->
get(PPC::TCHECK), CRReg);
9736 int RefinementSteps = Subtarget.
hasRecipPrec() ? 1 : 3;
9739 return RefinementSteps;
9743 int Enabled,
int &RefinementSteps,
9744 bool &UseOneConstNR,
9745 bool Reciprocal)
const {
9748 (VT == MVT::f64 && Subtarget.
hasFRSQRTE()) ||
9752 (VT == MVT::v4f64 && Subtarget.
hasQPX())) {
9753 if (RefinementSteps == ReciprocalEstimate::Unspecified)
9756 UseOneConstNR =
true;
9764 int &RefinementSteps)
const {
9767 (VT == MVT::f64 && Subtarget.
hasFRE()) ||
9771 (VT == MVT::v4f64 && Subtarget.
hasQPX())) {
9772 if (RefinementSteps == ReciprocalEstimate::Unspecified)
9779 unsigned PPCTargetLowering::combineRepeatedFPDivisors()
const {
9807 Offset += cast<ConstantSDNode>(Loc.
getOperand(1))->getSExtValue();
9816 unsigned Bytes,
int Dist,
9826 int FI = cast<FrameIndexSDNode>(Loc)->getIndex();
9827 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
9830 if (FS != BFS || FS != (
int)Bytes)
return false;
9834 SDValue Base1 = Loc, Base2 = BaseLoc;
9835 int64_t Offset1 = 0, Offset2 = 0;
9838 if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes))
9848 if (isGA1 && isGA2 && GV1 == GV2)
9849 return Offset1 == (Offset2 + Dist*Bytes);
9856 unsigned Bytes,
int Dist,
9859 EVT VT =
LS->getMemoryVT();
9866 switch (cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue()) {
9867 default:
return false;
9868 case Intrinsic::ppc_qpx_qvlfd:
9869 case Intrinsic::ppc_qpx_qvlfda:
9872 case Intrinsic::ppc_qpx_qvlfs:
9873 case Intrinsic::ppc_qpx_qvlfsa:
9876 case Intrinsic::ppc_qpx_qvlfcd:
9877 case Intrinsic::ppc_qpx_qvlfcda:
9880 case Intrinsic::ppc_qpx_qvlfcs:
9881 case Intrinsic::ppc_qpx_qvlfcsa:
9884 case Intrinsic::ppc_qpx_qvlfiwa:
9885 case Intrinsic::ppc_qpx_qvlfiwz:
9886 case Intrinsic::ppc_altivec_lvx:
9887 case Intrinsic::ppc_altivec_lvxl:
9888 case Intrinsic::ppc_vsx_lxvw4x:
9889 case Intrinsic::ppc_vsx_lxvw4x_be:
9892 case Intrinsic::ppc_vsx_lxvd2x:
9893 case Intrinsic::ppc_vsx_lxvd2x_be:
9896 case Intrinsic::ppc_altivec_lvebx:
9899 case Intrinsic::ppc_altivec_lvehx:
9902 case Intrinsic::ppc_altivec_lvewx:
9912 switch (cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue()) {
9913 default:
return false;
9914 case Intrinsic::ppc_qpx_qvstfd:
9915 case Intrinsic::ppc_qpx_qvstfda:
9918 case Intrinsic::ppc_qpx_qvstfs:
9919 case Intrinsic::ppc_qpx_qvstfsa:
9922 case Intrinsic::ppc_qpx_qvstfcd:
9923 case Intrinsic::ppc_qpx_qvstfcda:
9926 case Intrinsic::ppc_qpx_qvstfcs:
9927 case Intrinsic::ppc_qpx_qvstfcsa:
9930 case Intrinsic::ppc_qpx_qvstfiw:
9931 case Intrinsic::ppc_qpx_qvstfiwa:
9932 case Intrinsic::ppc_altivec_stvx:
9933 case Intrinsic::ppc_altivec_stvxl:
9934 case Intrinsic::ppc_vsx_stxvw4x:
9937 case Intrinsic::ppc_vsx_stxvd2x:
9940 case Intrinsic::ppc_vsx_stxvw4x_be:
9943 case Intrinsic::ppc_vsx_stxvd2x_be:
9946 case Intrinsic::ppc_altivec_stvebx:
9949 case Intrinsic::ppc_altivec_stvehx:
9952 case Intrinsic::ppc_altivec_stvewx:
9979 while (!
Queue.empty()) {
9981 if (!Visited.
insert(ChainNext).second)
9984 if (
MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) {
9988 if (!Visited.count(ChainLD->getChain().getNode()))
9989 Queue.push_back(ChainLD->getChain().getNode());
9991 for (
const SDUse &O : ChainNext->
ops())
9992 if (!Visited.count(O.getNode()))
9993 Queue.push_back(O.getNode());
9995 LoadRoots.
insert(ChainNext);
10007 IE = LoadRoots.end(); I !=
IE; ++
I) {
10008 Queue.push_back(*I);
10010 while (!
Queue.empty()) {
10012 if (!Visited.
insert(LoadRoot).second)
10015 if (
MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot))
10020 UE = LoadRoot->
use_end(); UI != UE; ++UI)
10021 if (((isa<MemSDNode>(*UI) &&
10022 cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) ||
10024 Queue.push_back(*UI);
10059 auto Final = Shifted;
10069 SDValue PPCTargetLowering::ConvertSETCCToSubtract(
SDNode *N,
10070 DAGCombinerInfo &DCI)
const {
10079 if (!DCI.isAfterLegalizeVectorOps())
10085 UE = N->
use_end(); UI != UE; ++UI) {
10093 unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits();
10095 if (OpSize < Size) {
10112 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(
SDNode *N,
10113 DAGCombinerInfo &DCI)
const {
10158 APInt Op1Zero, Op1One;
10159 APInt Op2Zero, Op2One;
10168 if (Op1Zero != Op2Zero || Op1One != Op2One)
10203 for (
unsigned i = 0; i < 2; ++
i) {
10219 while (!BinOps.
empty()) {
10262 for (
unsigned i = 0, ie = Inputs.
size(); i != ie; ++
i) {
10263 if (isa<ConstantSDNode>(Inputs[i]))
10267 UE = Inputs[
i].getNode()->use_end();
10270 if (User != N && !Visited.
count(User))
10289 for (
unsigned i = 0, ie = PromOps.
size(); i != ie; ++
i) {
10291 UE = PromOps[
i].getNode()->use_end();
10294 if (User != N && !Visited.
count(User))
10314 for (
unsigned i = 0, ie = Inputs.
size(); i != ie; ++
i) {
10317 if (isa<ConstantSDNode>(Inputs[i]))
10323 std::list<HandleSDNode> PromOpHandles;
10324 for (
auto &PromOp : PromOps)
10325 PromOpHandles.emplace_back(PromOp);
10332 while (!PromOpHandles.empty()) {
10334 PromOpHandles.pop_back();
10340 if (!isa<ConstantSDNode>(PromOp.
getOperand(0)) &&
10343 PromOpHandles.emplace_front(PromOp);
10348 if (isa<ConstantSDNode>(RepValue))
10357 default: C = 0;
break;
10362 if ((!isa<ConstantSDNode>(PromOp.
getOperand(C)) &&
10364 (!isa<ConstantSDNode>(PromOp.
getOperand(C+1)) &&
10370 PromOpHandles.emplace_front(PromOp);
10378 for (
unsigned i = 0; i < 2; ++
i)
10379 if (isa<ConstantSDNode>(Ops[C+i]))
10395 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(
SDNode *N,
10396 DAGCombinerInfo &DCI)
const {
10435 while (!BinOps.
empty()) {
10442 PromOps.push_back(BinOp);
10475 for (
unsigned i = 0, ie = Inputs.
size(); i != ie; ++
i) {
10476 if (isa<ConstantSDNode>(Inputs[i]))
10480 UE = Inputs[
i].getNode()->use_end();
10483 if (User != N && !Visited.count(User))
10490 SelectTruncOp[0].insert(std::make_pair(User,
10494 SelectTruncOp[0].insert(std::make_pair(User,
10497 SelectTruncOp[1].insert(std::make_pair(User,
10503 for (
unsigned i = 0, ie = PromOps.size(); i != ie; ++
i) {
10505 UE = PromOps[
i].getNode()->
use_end();
10508 if (User != N && !Visited.count(User))
10515 SelectTruncOp[0].insert(std::make_pair(User,
10519 SelectTruncOp[0].insert(std::make_pair(User,
10522 SelectTruncOp[1].insert(std::make_pair(User,
10529 bool ReallyNeedsExt =
false;
10533 for (
unsigned i = 0, ie = Inputs.
size(); i != ie; ++
i) {
10534 if (isa<ConstantSDNode>(Inputs[i]))
10538 Inputs[
i].getOperand(0).getValueSizeInBits();
10539 assert(PromBits < OpBits &&
"Truncation not to a smaller bit count?");
10544 OpBits-PromBits))) ||
10547 (OpBits-(PromBits-1)))) {
10548 ReallyNeedsExt =
true;
10556 for (
unsigned i = 0, ie = Inputs.
size(); i != ie; ++
i) {
10560 if (isa<ConstantSDNode>(Inputs[i]))
10563 SDValue InSrc = Inputs[
i].getOperand(0);
10577 std::list<HandleSDNode> PromOpHandles;
10578 for (
auto &PromOp : PromOps)
10579 PromOpHandles.emplace_back(PromOp);
10585 while (!PromOpHandles.empty()) {
10587 PromOpHandles.pop_back();
10591 default: C = 0;
break;
10596 if ((!isa<ConstantSDNode>(PromOp.
getOperand(C)) &&
10598 (!isa<ConstantSDNode>(PromOp.
getOperand(C+1)) &&
10604 PromOpHandles.emplace_front(PromOp);
10616 PromOpHandles.emplace_front(PromOp);
10625 for (
unsigned i = 0; i < 2; ++
i) {
10626 if (!isa<ConstantSDNode>(Ops[C+i]))
10643 auto SI0 = SelectTruncOp[0].
find(PromOp.
getNode());
10644 if (SI0 != SelectTruncOp[0].
end())
10646 auto SI1 = SelectTruncOp[1].
find(PromOp.
getNode());
10647 if (SI1 != SelectTruncOp[1].
end())
10656 if (!ReallyNeedsExt)
10668 "Invalid extension type");
10686 combineElementTruncationToVectorTruncation(
SDNode *N,
10687 DAGCombinerInfo &DCI)
const {
10689 "Should be called with a BUILD_VECTOR node");
10696 "The input operand must be an fp-to-int conversion.");
10705 bool IsSplat =
true;
10715 if (NextConversion != FirstConversion)
10737 DAG.getIntPtrConstant(1, dl));
10752 SDValue BV = DAG.getBuildVector(NewVT, dl, Ops);
10753 return DAG.
getNode(Opcode, dl, TargetVT, BV);
10766 "Should be called with a BUILD_VECTOR node");
10769 bool InputsAreConsecutiveLoads =
true;
10770 bool InputsAreReverseConsecutive =
true;
10773 bool IsRoundOfExtLoad =
false;
10800 if (IsRoundOfExtLoad && LD2->getExtensionType() !=
ISD::EXTLOAD)
10804 InputsAreConsecutiveLoads =
false;
10806 InputsAreReverseConsecutive =
false;
10809 if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive)
10813 assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) &&
10814 "The loads cannot be both consecutive and reverse consecutive.");
10817 IsRoundOfExtLoad ? FirstInput.
getOperand(0) : FirstInput;
10824 if (InputsAreConsecutiveLoads) {
10825 assert(LD1 &&
"Input needs to be a LoadSDNode.");
10830 if (InputsAreReverseConsecutive) {
10831 assert(LDL &&
"Input needs to be a LoadSDNode.");
10833 LDL->getBasePtr(), LDL->getPointerInfo(),
10834 LDL->getAlignment());
10845 SDValue PPCTargetLowering::DAGCombineBuildVector(
SDNode *N,
10846 DAGCombinerInfo &DCI)
const {
10848 "Should be called with a BUILD_VECTOR node");
10853 if (!Subtarget.
hasVSX())
10861 SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI);
10894 if (!Ext1Op || !Ext2Op)
10902 int SecondElem = Ext2Op->getZExtValue();
10904 if (FirstElem == 0 && SecondElem == 1)
10906 else if (FirstElem == 2 && SecondElem == 3)
10915 SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl));
10919 DAGCombinerInfo &DCI)
const {
10922 "Need an int -> FP conversion node here");
10933 (FirstOperand.getValueType() ==
MVT::i8 ||
10934 FirstOperand.getValueType() ==
MVT::i16);
10938 unsigned ConvOp = Signed ?
10944 LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode());
10952 SDValue ExtOps[] = { Ld, WidthConst };
10954 return DAG.
getNode(ConvOp, dl, DstDouble ? MVT::f64 :
MVT::f32, Ext);
10973 "UINT_TO_FP is supported only with FPCVT");
10994 DCI.AddToWorklist(Src.
getNode());
11010 DCI.AddToWorklist(FP.
getNode());
11056 SDValue LoadOps[] = { Chain, Base };
11068 if (VecTy != MVT::v2f64) {
11130 SDValue StoreOps[] = { Chain, Swap, Base };
11133 StoreOps, VecTy, MMO);
11154 if (C->isNullValue() ||
11155 C->isAllOnesValue())
11162 return DAGCombineExtBoolTrunc(N, DCI);
11166 return DAGCombineTruncBoolExt(N, DCI);
11169 return combineFPToIntToFP(N, DCI);
11172 bool ValidTypeForStoreFltAsInt = (Op1VT ==
MVT::i32) ||
11176 if (Subtarget.
hasSTFIWX() && !cast<StoreSDNode>(
N)->isTruncatingStore() &&
11178 ValidTypeForStoreFltAsInt &&
11188 if (Op1VT == MVT::i32) {
11196 cast<StoreSDNode>(
N)->getMemoryVT(),
11197 cast<StoreSDNode>(
N)->getMemOperand());
11199 unsigned WidthInBytes =
11209 cast<StoreSDNode>(
N)->getMemoryVT(),
11210 cast<StoreSDNode>(
N)->getMemOperand());
11218 if (cast<StoreSDNode>(N)->isUnindexed() &&
11236 Ops, cast<StoreSDNode>(
N)->getMemoryVT(),
11237 cast<StoreSDNode>(
N)->getMemOperand());
11258 if (VT.isSimple()) {
11259 MVT LoadVT = VT.getSimpleVT();
11273 auto ReplaceTwoFloatLoad = [&]() {
11293 while (UI.getUse().getResNo() != 0) ++UI;
11295 while (UI.getUse().getResNo() != 0) ++UI;
11296 SDNode *RightShift = *UI;
11305 !isa<ConstantSDNode>(RightShift->
getOperand(1)) ||
11335 "Non-pre-inc AM on PPC?");
11368 if (ReplaceTwoFloatLoad())
11414 MVT PermCntlTy, PermTy, LDTy;
11416 Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr :
11417 Intrinsic::ppc_altivec_lvsl;
11418 IntrLD = Intrinsic::ppc_altivec_lvx;
11419 IntrPerm = Intrinsic::ppc_altivec_vperm;
11424 Intr = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld :
11425 Intrinsic::ppc_qpx_qvlpcls;
11426 IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd :
11427 Intrinsic::ppc_qpx_qvlfs;
11428 IntrPerm = Intrinsic::ppc_qpx_qvfperm;
11450 SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr };
11454 BaseLoadOps, LDTy, BaseMMO);
11462 int IncOffset = VT.getSizeInBits() / 8;
11463 int IncValue = IncOffset;
11480 SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr };
11484 ExtraLoadOps, LDTy, ExtraMMO);
11495 if (isLittleEndian)
11497 ExtraLoad, BaseLoad, PermCntl, DAG, dl);
11500 BaseLoad, ExtraLoad, PermCntl, DAG, dl);
11519 unsigned IID = cast<ConstantSDNode>(N->
getOperand(0))->getZExtValue();
11520 Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr
11521 : Intrinsic::ppc_altivec_lvsl);
11522 if ((IID == Intr ||
11523 IID == Intrinsic::ppc_qpx_qvlpcld ||
11524 IID == Intrinsic::ppc_qpx_qvlpcls) &&
11528 int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ?
11539 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) {
11549 if (isa<ConstantSDNode>(Add->
getOperand(1))) {
11552 UE = BasePtr->
use_end(); UI != UE; ++UI) {
11553 if (UI->getOpcode() ==
ISD::ADD &&
11554 isa<ConstantSDNode>(UI->getOperand(1)) &&
11555 (cast<ConstantSDNode>(Add->
getOperand(1))->getZExtValue() -
11556 cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) %
11557 (1ULL << Bits) == 0) {
11562 cast<ConstantSDNode>(
VI->getOperand(0))->getZExtValue() == IID) {
11577 switch (cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue()) {
11580 case Intrinsic::ppc_vsx_lxvw4x:
11581 case Intrinsic::ppc_vsx_lxvd2x:
11591 switch (cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue()) {
11594 case Intrinsic::ppc_vsx_stxvw4x:
11595 case Intrinsic::ppc_vsx_stxvd2x:
11650 SDNode *VCMPoNode =
nullptr;
11671 SDNode *FlagUser =
nullptr;
11673 FlagUser ==
nullptr; ++UI) {
11676 for (
unsigned i = 0, e = User->getNumOperands(); i != e; ++
i) {
11677 if (User->getOperand(i) ==
SDValue(VCMPoNode, 1)) {
11687 return SDValue(VCMPoNode, 0);
11696 cast<ConstantSDNode>(Cond.
getOperand(1))->getZExtValue() ==
11697 Intrinsic::ppc_is_decremented_ctr_nonzero) {
11703 "Counter decrement has more than one use");
11723 Intrinsic::ppc_is_decremented_ctr_nonzero &&
11729 cast<ConstantSDNode>(LHS.
getOperand(1))->getZExtValue() ==
11730 Intrinsic::ppc_is_decremented_ctr_nonzero &&
11731 isa<ConstantSDNode>(RHS)) {
11733 "Counter decrement comparison is not EQ or NE");
11735 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
11743 "Counter decrement has more than one use");
11755 assert(isDot &&
"Can't compare against a vector result!");
11759 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
11760 if (Val != 0 && Val != 1) {
11768 bool BranchOnWhenPredTrue = (CC ==
ISD::SETEQ) ^ (Val == 0);
11781 switch (cast<ConstantSDNode>(LHS.
getOperand(1))->getZExtValue()) {
11805 return DAGCombineBuildVector(N, DCI);
11814 std::vector<SDNode *> *Created)
const {
11819 if ((VT != MVT::i32 && VT !=
MVT::i64) ||
11820 !(Divisor.
isPowerOf2() || (-Divisor).isPowerOf2()))
11826 bool IsNegPow2 = (-Divisor).isPowerOf2();
11832 Created->push_back(Op.
getNode());
11837 Created->push_back(Op.
getNode());
11851 unsigned Depth)
const {
11858 KnownZero = 0xFFFF0000;
11862 switch (cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue()) {
11864 case Intrinsic::ppc_altivec_vcmpbfp_p:
11865 case Intrinsic::ppc_altivec_vcmpeqfp_p:
11866 case Intrinsic::ppc_altivec_vcmpequb_p:
11867 case Intrinsic::ppc_altivec_vcmpequh_p:
11868 case Intrinsic::ppc_altivec_vcmpequw_p:
11869 case Intrinsic::ppc_altivec_vcmpequd_p:
11870 case Intrinsic::ppc_altivec_vcmpgefp_p:
11871 case Intrinsic::ppc_altivec_vcmpgtfp_p:
11872 case Intrinsic::ppc_altivec_vcmpgtsb_p:
11873 case Intrinsic::ppc_altivec_vcmpgtsh_p:
11874 case Intrinsic::ppc_altivec_vcmpgtsw_p:
11875 case Intrinsic::ppc_altivec_vcmpgtsd_p:
11876 case Intrinsic::ppc_altivec_vcmpgtub_p:
11877 case Intrinsic::ppc_altivec_vcmpgtuh_p:
11878 case Intrinsic::ppc_altivec_vcmpgtuw_p:
11879 case Intrinsic::ppc_altivec_vcmpgtud_p:
11906 uint64_t LoopSize = 0;
11908 for (
auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) {
11914 if (LoopSize > 16 && LoopSize <= 32)
11928 if (Constraint.
size() == 1) {
11929 switch (Constraint[0]) {
11947 }
else if (Constraint ==
"wc") {
11949 }
else if (Constraint ==
"wa" || Constraint ==
"wd" ||
11950 Constraint ==
"wf" || Constraint ==
"ws") {
11966 if (!CallOperandVal)
11973 else if ((
StringRef(constraint) ==
"wa" ||
11981 switch (*constraint) {
12011 std::pair<unsigned, const TargetRegisterClass *>
12015 if (Constraint.
size() == 1) {
12017 switch (Constraint[0]) {
12020 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass);
12021 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass);
12024 return std::make_pair(0U, &PPC::G8RCRegClass);
12025 return std::make_pair(0U, &PPC::GPRCRegClass);
12031 if (VT ==
MVT::f32 || VT == MVT::i32)
12032 return std::make_pair(0U, &PPC::F4RCRegClass);
12033 if (VT == MVT::f64 || VT ==
MVT::i64)
12034 return std::make_pair(0U, &PPC::F8RCRegClass);
12035 if (VT == MVT::v4f64 && Subtarget.
hasQPX())
12036 return std::make_pair(0U, &PPC::QFRCRegClass);
12038 return std::make_pair(0U, &PPC::QSRCRegClass);
12041 if (VT == MVT::v4f64 && Subtarget.
hasQPX())
12042 return std::make_pair(0U, &PPC::QFRCRegClass);
12044 return std::make_pair(0U, &PPC::QSRCRegClass);
12046 return std::make_pair(0U, &PPC::VRRCRegClass);
12048 return std::make_pair(0U, &PPC::CRRCRegClass);
12050 }
else if (Constraint ==
"wc" && Subtarget.
useCRBits()) {
12052 return std::make_pair(0U, &PPC::CRBITRCRegClass);
12053 }
else if ((Constraint ==
"wa" || Constraint ==
"wd" ||
12054 Constraint ==
"wf") && Subtarget.
hasVSX()) {
12055 return std::make_pair(0U, &PPC::VSRCRegClass);
12056 }
else if (Constraint ==
"ws" && Subtarget.
hasVSX()) {
12058 return std::make_pair(0U, &PPC::VSSRCRegClass);
12060 return std::make_pair(0U, &PPC::VSFRCRegClass);
12063 std::pair<unsigned, const TargetRegisterClass *> R =
12073 PPC::GPRCRegClass.contains(R.first))
12075 PPC::sub_32, &PPC::G8RCRegClass),
12076 &PPC::G8RCRegClass);
12080 R.first = PPC::CR0;
12081 R.second = &PPC::CRRCRegClass;
12090 std::string &Constraint,
12091 std::vector<SDValue>&Ops,
12096 if (Constraint.length() > 1)
return;
12098 char Letter = Constraint[0];
12122 if (isShiftedUInt<16, 16>(Value))
12126 if (isShiftedInt<16, 16>(Value))
12155 Ops.push_back(Result);
12167 unsigned AS)
const {
12181 switch (AM.
Scale) {
12212 unsigned Depth = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
12218 bool isPPC64 = Subtarget.
isPPC64();
12222 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
12232 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
12240 unsigned Depth = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
12253 FrameReg = isPPC64 ? PPC::X1 : PPC::R1;
12255 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP;
12269 bool isPPC64 = Subtarget.
isPPC64();
12272 if ((isPPC64 && VT !=
MVT::i64 && VT != MVT::i32) ||
12273 (!isPPC64 && VT != MVT::i32))
12278 .Case(
"r1", is64Bit ? PPC::X1 : PPC::R1)
12279 .
Case(
"r2", (isDarwinABI || isPPC64) ? 0 : PPC::R2)
12280 .
Case(
"r13", (!isPPC64 && isDarwinABI) ? 0 :
12281 (is64Bit ? PPC::X13 : PPC::R13))
12297 unsigned Intrinsic)
const {
12299 switch (Intrinsic) {
12300 case Intrinsic::ppc_qpx_qvlfd:
12301 case Intrinsic::ppc_qpx_qvlfs:
12302 case Intrinsic::ppc_qpx_qvlfcd:
12303 case Intrinsic::ppc_qpx_qvlfcs:
12304 case Intrinsic::ppc_qpx_qvlfiwa:
12305 case Intrinsic::ppc_qpx_qvlfiwz:
12306 case Intrinsic::ppc_altivec_lvx:
12307 case Intrinsic::ppc_altivec_lvxl:
12308 case Intrinsic::ppc_altivec_lvebx:
12309 case Intrinsic::ppc_altivec_lvehx:
12310 case Intrinsic::ppc_altivec_lvewx:
12311 case Intrinsic::ppc_vsx_lxvd2x:
12312 case Intrinsic::ppc_vsx_lxvw4x: {
12314 switch (Intrinsic) {
12315 case Intrinsic::ppc_altivec_lvebx:
12318 case Intrinsic::ppc_altivec_lvehx:
12321 case Intrinsic::ppc_altivec_lvewx:
12324 case Intrinsic::ppc_vsx_lxvd2x:
12327 case Intrinsic::ppc_qpx_qvlfd:
12330 case Intrinsic::ppc_qpx_qvlfs:
12333 case Intrinsic::ppc_qpx_qvlfcd:
12336 case Intrinsic::ppc_qpx_qvlfcs:
12355 case Intrinsic::ppc_qpx_qvlfda:
12356 case Intrinsic::ppc_qpx_qvlfsa:
12357 case Intrinsic::ppc_qpx_qvlfcda:
12358 case Intrinsic::ppc_qpx_qvlfcsa:
12359 case Intrinsic::ppc_qpx_qvlfiwaa:
12360 case Intrinsic::ppc_qpx_qvlfiwza: {
12362 switch (Intrinsic) {
12363 case Intrinsic::ppc_qpx_qvlfda:
12366 case Intrinsic::ppc_qpx_qvlfsa:
12369 case Intrinsic::ppc_qpx_qvlfcda:
12372 case Intrinsic::ppc_qpx_qvlfcsa:
12391 case Intrinsic::ppc_qpx_qvstfd:
12392 case Intrinsic::ppc_qpx_qvstfs:
12393 case Intrinsic::ppc_qpx_qvstfcd:
12394 case Intrinsic::ppc_qpx_qvstfcs:
12395 case Intrinsic::ppc_qpx_qvstfiw:
12396 case Intrinsic::ppc_altivec_stvx:
12397 case Intrinsic::ppc_altivec_stvxl:
12398 case Intrinsic::ppc_altivec_stvebx:
12399 case Intrinsic::ppc_altivec_stvehx:
12400 case Intrinsic::ppc_altivec_stvewx:
12401 case Intrinsic::ppc_vsx_stxvd2x:
12402 case Intrinsic::ppc_vsx_stxvw4x: {
12404 switch (Intrinsic) {
12405 case Intrinsic::ppc_altivec_stvebx:
12408 case Intrinsic::ppc_altivec_stvehx:
12411 case Intrinsic::ppc_altivec_stvewx:
12414 case Intrinsic::ppc_vsx_stxvd2x:
12417 case Intrinsic::ppc_qpx_qvstfd:
12420 case Intrinsic::ppc_qpx_qvstfs:
12423 case Intrinsic::ppc_qpx_qvstfcd:
12426 case Intrinsic::ppc_qpx_qvstfcs:
12445 case Intrinsic::ppc_qpx_qvstfda:
12446 case Intrinsic::ppc_qpx_qvstfsa:
12447 case Intrinsic::ppc_qpx_qvstfcda:
12448 case Intrinsic::ppc_qpx_qvstfcsa:
12449 case Intrinsic::ppc_qpx_qvstfiwa: {
12451 switch (Intrinsic) {
12452 case Intrinsic::ppc_qpx_qvstfda:
12455 case Intrinsic::ppc_qpx_qvstfsa:
12458 case Intrinsic::ppc_qpx_qvstfcda:
12461 case Intrinsic::ppc_qpx_qvstfcsa:
12499 unsigned DstAlign,
unsigned SrcAlign,
12500 bool IsMemset,
bool ZeroMemset,
12507 if (Subtarget.
hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) &&
12508 (!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) &&
12516 (((!SrcAlign || SrcAlign >= 16) && (!DstAlign || DstAlign >= 16)) ||
12535 return !(BitSize == 0 || BitSize > 64);
12543 return NumBits1 == 64 && NumBits2 == 32;
12551 return NumBits1 == 64 && NumBits2 == 32;
12557 if (
LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) {
12590 bool *
Fast)
const {
12604 if (Subtarget.
hasVSX()) {
12645 static const MCPhysReg ScratchRegs[] = {
12646 PPC::X12, PPC::LR8, PPC::CTR8, 0
12649 return ScratchRegs;
12653 const Constant *PersonalityFn)
const {
12654 return Subtarget.
isPPC64() ? PPC::X3 : PPC::R3;
12658 const Constant *PersonalityFn)
const {
12664 EVT VT ,
unsigned DefinedValues)
const {
12690 if (!Subtarget.
isPPC64())
return;
12710 if (PPC::G8RCRegClass.
contains(*I))
12711 RC = &PPC::G8RCRegClass;
12712 else if (PPC::F8RCRegClass.
contains(*I))
12713 RC = &PPC::F8RCRegClass;
12714 else if (PPC::CRRCRegClass.
contains(*I))
12715 RC = &PPC::CRRCRegClass;
12716 else if (PPC::VRRCRegClass.
contains(*I))
12717 RC = &PPC::VRRCRegClass;
12728 Attribute::NoUnwind) &&
12729 "Function should be nounwind in insertCopiesSplitCSR!");
12735 for (
auto *Exit : Exits)
12737 TII->
get(TargetOpcode::COPY), *
I)
bool hasType(MVT vt) const
Return true if this TargetRegisterClass has the ValueType vt.
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
PPCTargetLowering(const PPCTargetMachine &TM, const PPCSubtarget &STI)
static bool resideInSameSection(const Function *Caller, SDValue Callee, const TargetMachine &TM)
ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
G8RC = ADDI_TLSLD_L_ADDR G8RReg, Symbol, Symbol - Op that combines ADDI_TLSLD_L and GET_TLSLD_ADDR un...
cl::opt< bool > ANDIGlueBug
X3 = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS model, produces an ADDI8 instruction tha...
void setFrameAddressIsTaken(bool T)
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
unsigned getValueSizeInBits(unsigned ResNo) const
Returns MVT::getSizeInBits(getValueType(ResNo)).
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
void push_back(const T &Elt)
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
A parsed version of the target data layout string in and methods for querying it. ...
const_iterator end(StringRef path)
Get end iterator over path.
SDValue expandVSXStoreForLE(SDNode *N, DAGCombinerInfo &DCI) const
LinkageTypes getLinkage() const
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
SDValue getValue(unsigned R) const
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
FastISel * createFastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo)
static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT, SelectionDAG &DAG, const SDLoc &dl)
BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified amount.
FormattedString left_justify(StringRef Str, unsigned Width)
left_justify - append spaces after string so total output is Width characters.
Return with a flag operand, matched by 'blr'.
Newer FCTI[D,W]UZ floating-point-to-integer conversion instructions for unsigned integers with round ...
SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG)
get_VSPLTI_elt - If this is a build_vector of constants which can be formed by using a vspltis[bhw] i...
static Instruction * callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id)
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a byte-swapping load instruction.
void setVarArgsNumGPR(unsigned Num)
raw_ostream & errs()
This returns a reference to a raw_ostream for standard error.
bool use64BitRegs() const
use64BitRegs - Return true if in 64-bit mode or if we should use 64-bit registers in 32-bit mode when...
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
static bool hasSameArgumentList(const Function *CallerFn, ImmutableCallSite *CS)
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant, which is required to be operand #1) half of the integer or float value specified as operand #0.
unsigned getRegisterByName(const char *RegName, EVT VT, SelectionDAG &DAG) const override
Return the register ID of the name passed in.
VisibilityTypes getVisibility() const
MachineBasicBlock * emitEHSjLjLongJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
Flags getFlags() const
Return the raw flags of the source value,.
unsigned getByValTypeAlignment(Type *Ty, const DataLayout &DL) const override
getByValTypeAlignment - Return the desired alignment for ByVal aggregate function arguments in the ca...
static APInt getAllOnesValue(unsigned numBits)
Get the all-ones value.
LLVMContext * getContext() const
static cl::opt< bool > DisableSCO("disable-ppc-sco", cl::desc("disable sibling call optimization on ppc"), cl::Hidden)
QVFPERM = This corresponds to the QPX qvfperm instruction.
uint64_t getZExtValue() const
Get zero extended value.
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue CombineTo(SDNode *N, ArrayRef< SDValue > To, bool AddTo=true)
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd)...
static cl::opt< bool > DisablePPCUnaligned("disable-ppc-unaligned", cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden)
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
SDValue getIndexedLoad(SDValue OrigLoad, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
STATISTIC(NumFunctions,"Total number of functions")
static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, SelectionDAG &DAG)
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR (an vector value) starting with the ...
Reloc::Model getRelocationModel() const
Returns the code generation relocation model.
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
BR_CC - Conditional branch.
unsigned getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
LocInfo getLocInfo() const
GPRC = address of GLOBAL_OFFSET_TABLE.
bool hasOneUse() const
Return true if there is exactly one use of this node.
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
bool isLittleEndian() const
G8RC = ADDI_DTPREL_L G8RReg, Symbol - For the local-dynamic TLS model, produces an ADDI8 instruction ...
A Module instance is used to store all the information related to an LLVM module. ...
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
bool CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
bool CC_PPC32_SVR4_Custom_SkipLastArgRegsPPCF128(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
const TargetMachine & getTargetMachine() const
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
class llvm::RegisterBankInfo GPR
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
static void LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg, SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64, bool isTailCall, bool isVector, SmallVectorImpl< SDValue > &MemOpChains, SmallVectorImpl< TailCallArgumentInfo > &TailCallArguments, const SDLoc &dl)
LowerMemOpCallTo - Store the argument to the stack or remember it in case of tail calls...
SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const
int getSplatIndex() const
void setLRStoreRequired()
const TargetMachine & getTarget() const
unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
The following two target-specific nodes are used for calls through function pointers in the 64-bit SV...
VRRC = VADD_SPLAT Elt, EltSize - Temporary node to be expanded during instruction selection to optimi...
bool isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, unsigned &InsertAtByte, bool &Swap, bool IsLE)
isXXINSERTWMask - Return true if this VECTOR_SHUFFLE can be handled by the XXINSERTW instruction intr...
MO_LO, MO_HA - lo16(symbol) and ha16(symbol)
const TargetSubtargetInfo & getSubtarget() const
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain...
This class represents a function call, abstracting a target machine's calling convention.
static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG)
void computeKnownBits(SDValue Op, APInt &KnownZero, APInt &KnownOne, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in the KnownZero/KnownO...
MachineBasicBlock * EmitPartwordAtomicBinary(MachineInstr &MI, MachineBasicBlock *MBB, bool is8bit, unsigned Opcode, unsigned CmpOpcode=0, unsigned CmpPred=0) const
size_type count(PtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Get a value with low bits set.
const GlobalValue * getGlobal() const
QBRC, CHAIN = QVLFSb CHAIN, Ptr The 4xf32 load used for v4i1 constants.
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change...
LLVM_NODISCARD bool equals_lower(StringRef RHS) const
equals_lower - Check for string equality, ignoring case.
bool hasLazyResolverStub(const GlobalValue *GV) const
hasLazyResolverStub - Return true if accesses to the specified global have to go through a dyld lazy ...
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
static MVT getFloatingPointVT(unsigned BitWidth)
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Function Alias Analysis Results
Type * getTypeForEVT(LLVMContext &Context) const
getTypeForEVT - This method returns an LLVM type corresponding to the specified EVT.
unsigned getSizeInBits() const
static SDValue getTOCEntry(SelectionDAG &DAG, const SDLoc &dl, bool Is64Bit, SDValue GA)
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or anything else with this node...
unsigned getByValSize() const
bool hasP9Altivec() const
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
CHAIN = RFEBB CHAIN, State - Return from event-based branch.
VEXTS, ByteWidth - takes an input in VSFRC and produces an output in VSFRC that is sign-extended from...
unsigned getNumOperands() const
Return the number of values used by this operation.
static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT, ISD::ArgFlagsTy Flags, unsigned PtrByteSize)
CalculateStackSlotAlignment - Calculates the alignment of this argument on the stack.
bool isDarwin() const
isDarwin - True if this is any darwin platform.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
const Function * getParent() const
Return the enclosing method, or null if none.
unsigned getNumOperands() const
unsigned getValueSizeInBits() const
Returns the size of the value in bits.
constexpr bool isInt< 16 >(int64_t x)
int64_t getOffset() const
const SDValue & getOperand(unsigned Num) const
bool shouldExpandBuildVectorWithShuffles(EVT VT, unsigned DefinedValues) const override
const Function * getFunction() const
getFunction - Return the LLVM function that this machine code represents
unsigned getVarArgsNumGPR() const
CALL - A direct function call.
virtual bool shouldExpandBuildVectorWithShuffles(EVT, unsigned DefinedValues) const
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT TVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
CHAIN,FLAG = BCTRL(CHAIN, INFLAG) - Directly corresponds to a BCTRL instruction.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
static BranchProbability getOne()
Value * CallOperandVal
If this is the result output operand or a clobber, this is null, otherwise it is the incoming operand...
unsigned getValNo() const
const SDValue & getBasePtr() const
SDValue getConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offs=0, bool isT=false, unsigned char TargetFlags=0)
void setVarArgsNumFPR(unsigned Num)
bool isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a VPKUDUM instruction.
MachineBasicBlock * emitEHSjLjSetJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
getPreIndexedAddressParts - returns true by value, base pointer and offset pointer and addressing mod...
static void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp, SDValue FPOp, SmallVectorImpl< TailCallArgumentInfo > &TailCallArguments)
Newer FCFID[US] integer-to-floating-point conversion instructions for unsigned integers and single-pr...
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
int getReturnAddrSaveIndex() const
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
bool isLegalICmpImmediate(int64_t Imm) const override
isLegalICmpImmediate - Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructions which can compare a register against the immediate without having to materialize the immediate into a register.
bool isUnsignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs an unsigned comparison when used with intege...
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
return AArch64::GPR64RegClass contains(Reg)
SDValue getExternalSymbol(const char *Sym, EVT VT)
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
unsigned arg_size() const
bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a VPKUHUM instruction.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering, unsigned NumBytes)
EnsureStackAlignment - Round stack frame size up from NumBytes to ensure minimum alignment required f...
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
GlobalBaseReg - On Darwin, this node represents the result of the mflr at function entry...
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, unsigned Align=1, bool *Fast=nullptr) const override
Is unaligned memory access allowed for the given type, and is it fast relative to software emulation...
bool isFPExtFree(EVT VT) const override
Return true if an fpext operation is free (for instance, because single-precision floating-point numb...
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic...
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations...
bool isVector() const
isVector - Return true if this is a vector value type.
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
The address of a basic block.
void setVarArgsStackOffset(int Offset)
A description of a memory reference used in the backend.
std::string getEVTString() const
getEVTString - This function returns value type as a string, e.g.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
PPCFunctionInfo - This class is derived from MachineFunction private PowerPC target-specific informat...
struct fuzzer::@269 Flags
int64_t getOffset() const
const HexagonInstrInfo * TII
static Type * getFloatTy(LLVMContext &C)
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
G8RC = ADDI_TLSGD_L_ADDR G8RReg, Symbol, Symbol - Op that combines ADDI_TLSGD_L and GET_TLS_ADDR unti...
static MachineOperand CreateReg(unsigned Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false)
Shift and rotation operations.
Class to represent struct types.
static cl::opt< bool > DisablePPCPreinc("disable-ppc-preinc", cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden)
Base class for LoadSDNode and StoreSDNode.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s), MachineInstr opcode, and operands.
MachineFunction & getMachineFunction() const
ValTy * getCalledValue() const
getCalledValue - Return the pointer to function that is being called.
unsigned getMinReservedArea() const
static void advance(T &it, size_t Val)
static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign, unsigned MaxMaxAlign)
getMaxByValAlign - Helper for getByValTypeAlignment to determine the desired ByVal argument alignment...
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
Base class for the full range of assembler expressions which are needed for parsing.
static Constant * get(ArrayRef< Constant * > V)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Optional< StringRef > getSectionPrefix() const
Get the section prefix for this function.
const TargetRegisterClass * getRegClass(unsigned Reg) const
Return the register class of the specified virtual register.
LLVM_ATTRIBUTE_ALWAYS_INLINE R Default(const T &Value) const
FLT_ROUNDS_ - Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest 2 Round to ...
int getVarArgsFrameIndex() const
Reg
All possible values of the reg field in the ModR/M byte.
void setCondCodeAction(ISD::CondCode CC, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
MachinePointerInfo getWithOffset(int64_t O) const
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA)...
The memory access is dereferenceable (i.e., doesn't trap).
virtual bool isJumpTableRelative() const
EVT getScalarType() const
getScalarType - If this is a vector type, return the element type, otherwise return this...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
Direct move from a GPR to a VSX register (algebraic)
X3 = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS model, produces an ADDI8 instruction t...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
bool isStrongDefinitionForLinker() const
Returns true if this global's definition will be the one chosen by the linker.
int getMaskElt(unsigned Idx) const
bool isPositionIndependent() const
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
unsigned getStoreSize() const
getStoreSize - Return the number of bytes overwritten by a store of the specified value type...
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG...
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
QVALIGNI = This corresponds to the QPX qvaligni instruction.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Sched::Preference getSchedulingPreference() const
Return target scheduling preference.
bool isInteger() const
isInteger - Return true if this is an integer, or a vector integer type.
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_NODISCARD bool empty() const
bool getBoolValue() const
Convert APInt to a boolean value.
SmallVector< ISD::InputArg, 32 > Ins
AtomicOrdering
Atomic ordering for LLVM's memory model.
STACKSAVE - STACKSAVE has one operand, an input chain.
static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT)
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
EVT getVectorElementType() const
getVectorElementType - Given a vector type, return the type of each element.
unsigned getPrefLoopAlignment(MachineLoop *ML) const override
Return the preferred loop alignment.
unsigned getReturnSaveOffset() const
getReturnSaveOffset - Return the previous frame offset to save the return address.
Context object for machine code objects.
bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector...
static const unsigned PerfectShuffleTable[6561+1]
bool isInConsecutiveRegs() const
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
LLVM_ATTRIBUTE_ALWAYS_INLINE StringSwitch & Case(const char(&S)[N], const T &Value)
bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize)
isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand specifies a splat of a singl...
unsigned getLocReg() const
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
unsigned NoNaNsFPMath
NoNaNsFPMath - This flag is enabled when the -enable-no-nans-fp-math flag is specified on the command...
static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned LHSStart, unsigned RHSStart)
isVMerge - Common function, used to match vmrg* shuffles.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
void insertCopiesSplitCSR(MachineBasicBlock *Entry, const SmallVectorImpl< MachineBasicBlock * > &Exits) const override
Insert explicit copies in entry and exit blocks.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
SDValue getRegisterMask(const uint32_t *RegMask)
EK_LabelDifference32 - Each entry is the address of the block minus the address of the jump table...
SDValue getTargetFrameIndex(int FI, EVT VT)
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
LowerOperation - Provide custom lowering hooks for some operations.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - Return the ISD::SETCC ValueType
CHAIN = BDNZ CHAIN, DESTBB - These are used to create counter-based loops.
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
R32 = MFOCRF(CRREG, INFLAG) - Represents the MFOCRF instruction.
G8RC = ADDIS_TLSGD_HA X2, Symbol - For the general-dynamic TLS model, produces an ADDIS8 instruction ...
Class to represent array types.
This contains information for each constraint that we are lowering.
Simple integer binary arithmetic operators.
unsigned getMatchingSuperReg(unsigned Reg, unsigned SubIdx, const TargetRegisterClass *RC) const
Return a super-register of the specified register Reg so its sub-register of index SubIdx is Reg...
MO_NLP_HIDDEN_FLAG - If this bit is set, the symbol reference is to a symbol with hidden visibility...
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself...
SmallVector< ISD::OutputArg, 32 > Outs
StringRef getSection() const
Get the custom section of this global if it has one.
CHAIN = STXVD2X CHAIN, VSRC, Ptr - Occurs only for little endian.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
bool hasInvariantFunctionDescriptors() const
bool isLittleEndian() const
Layout endianness...
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
const SDValue & getBasePtr() const
static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG)
Reduce the number of loads when building a vector.
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
static GCRegistry::Add< OcamlGC > B("ocaml","ocaml 3.10-compatible GC")
static const MCPhysReg FPR[]
FPR - The set of FP registers that should be allocated for arguments, on Darwin.
const MCPhysReg * getCalleeSavedRegsViaCopy(const MachineFunction *MF) const
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc, MachineFunction &MF) const override
getOptimalMemOpType - Returns the target specific optimal type for load and store operations as a res...
auto count(R &&Range, const E &Element) -> typename std::iterator_traits< decltype(std::begin(Range))>::difference_type
Wrapper function around std::count to count the number of times an element Element occurs in the give...
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
EVT getMemoryVT() const
Return the type of the in-memory value.
bool isInConsecutiveRegsLast() const
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
int getFramePointerSaveIndex() const
static bool isFloatingPointZero(SDValue Op)
isFloatingPointZero - Return true if this is 0.0 or -0.0.
bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for a VRGL* instruction with the ...
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base, int64_t &Offset, SelectionDAG &DAG)
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
bool isSignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs a signed comparison when used with integer o...
bool CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
bool isFMAFasterThanFMulAndFAdd(EVT VT) const override
isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster than a pair of fmul and fadd i...
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
XXINSERT - The PPC VSX insert instruction.
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
const DataLayout & getDataLayout() const
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
Maximum length of the test input libFuzzer tries to guess a good value based on the corpus and reports it always prefer smaller inputs during the corpus shuffle When libFuzzer itself reports a bug this exit code will be used If indicates the maximal total time in seconds to run the fuzzer minimizes the provided crash input Use with etc Experimental Use value profile to guide fuzzing Number of simultaneous worker processes to run the jobs If min(jobs, NumberOfCpuCores()/2)\" is used.") FUZZER_FLAG_INT(reload
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset)
Stack pointer relative access.
This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to compute an offset from native ...
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
This class is used to represent ISD::STORE nodes.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
static GCRegistry::Add< CoreCLRGC > E("coreclr","CoreCLR-compatible GC")
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
uint32_t FloatToBits(float Float)
FloatToBits - This function takes a float and returns the bit equivalent 32-bit integer.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the specified, possibly variable...
TargetInstrInfo - Interface to description of machine instruction set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Get a value with high bits set.
void setTailCallSPDelta(int size)
static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget)
VSRC, CHAIN = LXVD2X_LE CHAIN, Ptr - Occurs only for little endian.
mmo_iterator memoperands_end() const
SDNode * getNode() const
get the SDNode which holds the desired result
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
MinAlign - A and B are either alignments or offsets.
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
unsigned getScalarSizeInBits() const
unsigned getStoreSize() const
getStoreSize - Return the number of bytes overwritten by a store of the specified value type...
A switch()-like statement whose cases are string literals.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set, or Regs.size() if they are all allocated.
bool isOnlyUserOf(const SDNode *N) const
Return true if this node is the only use of N.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
Instruction * emitTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord, bool IsStore, bool IsLoad) const override
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
void addLiveIn(MCPhysReg PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineInstrBuilder & setMemRefs(MachineInstr::mmo_iterator b, MachineInstr::mmo_iterator e) const
unsigned UnsafeFPMath
UnsafeFPMath - This flag is enabled when the -enable-unsafe-fp-math flag is specified on the command ...
Control flow instructions. These all have token chains.
const SDValue & getBasePtr() const
unsigned const MachineRegisterInfo * MRI
std::size_t countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the least significant bit to the most stopping at the first 1...
FSEL - Traditional three-operand fsel node.
unsigned getFramePointerSaveOffset() const
getFramePointerSaveOffset - Return the previous frame offset to save the frame pointer.
CodeModel::Model getCodeModel() const
Returns the code model.
MVT - Machine Value Type.
LLVM Basic Block Representation.
const SDValue & getOperand(unsigned i) const
The instances of the Type class are immutable: once they are created, they are never changed...
This is an important class for using LLVM in a threaded context.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type...
virtual void insertSSPDeclarations(Module &M) const
Inserts necessary declarations for SSP (stack protection) purpose.
Simple binary floating point operators.
ch, gl = CR6[UN]SET ch, inglue - Toggle CR bit 6 for SVR4 vararg calls
void setTargetDAGCombine(ISD::NodeType NT)
Targets should invoke this method for each target independent node that they want to provide a custom...
bool isNonTemporal() const
C - The default llvm calling convention, compatible with C.
bool isVectorTy() const
True if this is an instance of VectorType.
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align=0, bool Vol=false, bool ReadMem=true, bool WriteMem=true, unsigned Size=0)
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
bool isOperationLegalOrCustom(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
static unsigned getSizeInBits(unsigned Reg, const MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI)
Get the size in bits of Reg.
This is an important base class in LLVM.
virtual unsigned getPrefLoopAlignment(MachineLoop *ML=nullptr) const
Return the preferred loop alignment.
MO_NLP_FLAG - If this bit is set, the symbol reference is actually to the non_lazy_ptr for the global...
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE...
G8RC = ADDIS_DTPREL_HA X3, Symbol - For the local-dynamic TLS model, produces an ADDIS8 instruction t...
bool isVector() const
isVector - Return true if this is a vector value type.
Direct move from a VSX register to a GPR.
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
static bool is64Bit(const char *name)
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL...
const Constant * getConstVal() const
CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a MTCTR instruction.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
bool isFloatingPoint() const
isFloatingPoint - Return true if this is a FP, or a vector FP type.
const MachineOperand & getOperand(unsigned i) const
unsigned getVarArgsNumFPR() const
static int CalculateTailCallSPDiff(SelectionDAG &DAG, bool isTailCall, unsigned ParamSize)
CalculateTailCallSPDiff - Get the amount the stack pointer has to be adjusted to accommodate the argu...
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
static ManagedStatic< OptionRegistry > OR
unsigned getLiveInVirtReg(unsigned PReg) const
getLiveInVirtReg - If PReg is a live-in physical register, return the corresponding live-in physical ...
ConstantFP - Floating Point Values [float, double].
unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize, SelectionDAG &DAG)
getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the specified isSplatShuffleMask...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base, unsigned Bytes, int Dist, SelectionDAG &DAG)
STFIWX - The STFIWX instruction.
POPCNTDKind hasPOPCNTD() const
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint, return the type of constraint it is for this target...
FCFID - The FCFID instruction, taking an f64 operand and producing and f64 value containing the FP re...
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned getScalarValueSizeInBits() const
static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const SDLoc &dl)
GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit the specified operations t...
bool useLoadStackGuardNode() const override
Override to support customized stack guard loading.
void AddToWorklist(SDNode *N)
const PPCFrameLowering * getFrameLowering() const override
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
constexpr bool isPowerOf2_64(uint64_t Value)
isPowerOf2_64 - This function returns true if the argument is a power of two 0 (64 bit edition...
static Type * getVoidTy(LLVMContext &C)
This class provides iterator support for SDUse operands that use a specific SDNode.
static void CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, SDValue Arg, int SPDiff, unsigned ArgOffset, SmallVectorImpl< TailCallArgumentInfo > &TailCallArguments)
CalculateTailCallArgDest - Remember Argument for later processing.
bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a VPKUWUM instruction.
INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.
bool any_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly...
static bool isIntS16Immediate(SDNode *N, short &Imm)
isIntS16Immediate - This method tests to see if the node is either a 32-bit or 64-bit immediate...
static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain, SDValue OldRetAddr, SDValue OldFP, int SPDiff, const SDLoc &dl)
EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to the appropriate stack sl...
unsigned getBitWidth() const
Return the number of bits in the APInt.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
void setVarArgsFrameIndex(int Index)
unsigned getOpcode() const
TRAP - Trapping instruction.
static const MCPhysReg QFPR[]
QFPR - The set of QPX registers that should be allocated for arguments.
unsigned GuaranteedTailCallOpt
GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is specified on the commandline...
CHAIN = CLRBHRB CHAIN - Clear branch history rolling buffer.
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
void setPrefFunctionAlignment(unsigned Align)
Set the target's preferred function alignment.
GPRC, CHAIN = LFIWAX CHAIN, Ptr - This is a floating-point load which sign-extends from a 32-bit inte...
static mvt_range vector_valuetypes()
static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc, bool &isDot, const PPCSubtarget &Subtarget)
getVectorCompareInfo - Given an intrinsic, return false if it is not a vector comparison.
bool has64BitSupport() const
has64BitSupport - Return true if the selected CPU supports 64-bit instructions, regardless of whether...
virtual bool useLoadStackGuardNode() const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
G8RC = ADDIS_TLSLD_HA X2, Symbol - For the local-dynamic TLS model, produces an ADDIS8 instruction th...
self_iterator getIterator()
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
G8RC = LD_GOT_TPREL_L Symbol, G8RReg - Used by the initial-exec TLS model, produces a LD instruction ...
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool Immutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
QVESPLATI = This corresponds to the QPX qvesplati instruction.
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero, APInt &KnownOne, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
use_iterator use_begin() const
Provide iteration support to walk over all uses of an SDNode.
const SDValue & getValue() const
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
unsigned NoInfsFPMath
NoInfsFPMath - This flag is enabled when the -enable-no-infs-fp-math flag is specified on the command...
Common code between 32-bit and 64-bit PowerPC targets.
unsigned MaxStoresPerMemmove
Specify maximum bytes of store instructions per memmove call.
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
bool getFunctionSections() const
Return true if functions should be emitted into their own section, corresponding to -ffunction-sectio...
int isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift amount, otherwise return -1...
EVT - Extended Value Type.
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
std::vector< ArgListEntry > ArgListTy
void initializeSplitCSR(MachineBasicBlock *Entry) const override
Perform necessary initialization to handle a subset of CSRs explicitly via copies.
void setMinReservedArea(unsigned size)
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
This structure contains all information that is necessary for lowering calls.
bool shouldAssumeDSOLocal(const Module &M, const GlobalValue *GV) const
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc, or post-dec.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements)
getVectorVT - Returns the EVT that represents a vector NumElements in length, where each element is o...
This class contains a discriminated union of information about pointers in memory operands...
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
void setUseUnderscoreLongJmp(bool Val)
Indicate whether this target prefers to use _longjmp to implement llvm.longjmp or the version without...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
User::const_op_iterator arg_iterator
arg_iterator - The type of iterator to use when looping over actual arguments at this call site...
Instruction * emitLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord, bool IsStore, bool IsLoad) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
virtual SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const
Returns relocation base for the given PIC jumptable.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
const BlockAddress * getBlockAddress() const
bool hasPartwordAtomics() const
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands...
virtual void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, unsigned DstReg, ArrayRef< MachineOperand > Cond, unsigned TrueReg, unsigned FalseReg) const
Insert a select instruction into MBB before I that will copy TrueReg to DstReg when Cond is true...
CHAIN,FLAG = BCTRL(CHAIN, ADDR, INFLAG) - The combination of a bctrl instruction and the TOC reload r...
const uint32_t * getNoPreservedMask() const override
SDValue CreateStackTemporary(EVT VT, unsigned minAlign=1)
Create a stack temporary, suitable for holding the specified value type.
bool isTargetLinux() const
bool hasRecipPrec() const
bool isEXTLoad(const SDNode *N)
Returns true if the specified node is a EXTLOAD.
static bool isReleaseOrStronger(AtomicOrdering ao)
const MachinePointerInfo & getPointerInfo() const
unsigned getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
LowerAsmOperandForConstraint - Lower the specified operand into the Ops vector.
GPRC, CHAIN = LXSIZX, CHAIN, Ptr, ByteWidth - This is a load of an integer smaller than 64 bits into ...
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
unsigned getByValAlign() const
The memory access writes data.
const SDValue & getOffset() const
unsigned getTOCSaveOffset() const
getTOCSaveOffset - Return the previous frame offset to save the TOC register – 64-bit SVR4 ABI only...
bool needsSwapsForVSXMemOps() const
Extract a subvector from unsigned integer vector and convert to FP.
ArrayRef< int > getMask() const
static unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, SDValue &Chain, SDValue CallSeqStart, const SDLoc &dl, int SPDiff, bool isTailCall, bool isPatchPoint, bool hasNest, SmallVectorImpl< std::pair< unsigned, SDValue >> &RegsToPass, SmallVectorImpl< SDValue > &Ops, std::vector< EVT > &NodeTys, ImmutableCallSite *CS, const PPCSubtarget &Subtarget)
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type...
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
SDValue getTargetConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offset=0, unsigned char TargetFlags=0)
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned char TargetFlags=0)
TokenFactor - This node takes multiple tokens as input and produces a single token result...
int getVarArgsStackOffset() const
QBFLT = Access the underlying QPX floating-point boolean representation.
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
bool isBuildVectorAllOnes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are ~0 or undef...
const PPCRegisterInfo * getRegisterInfo() const override
bool hasP8Altivec() const
Iterator for intrusive lists based on ilist_node.
static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, ISD::ArgFlagsTy Flags, unsigned PtrByteSize, unsigned LinkageSize, unsigned ParamAreaSize, unsigned &ArgOffset, unsigned &AvailableFPRs, unsigned &AvailableVRs, bool HasQPX)
CalculateStackSlotUsed - Return whether this argument will use its stack slot (instead of being passe...
CCState - This class holds information needed while lowering arguments and return values...
X3 = GET_TLSLD_ADDR X3, Symbol - For the local-dynamic TLS model, produces a call to __tls_get_addr(s...
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
bool hasDirectMove() const
virtual unsigned getJumpTableEncoding() const
Return the entry encoding for a jump table in the current function.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
GPRC = TOC_ENTRY GA, TOC Loads the entry for GA from the TOC, where the TOC base is given by the last...
MCSymbol * getPICBaseSymbol() const
getPICBaseSymbol - Return a function-local symbol to represent the PIC base.
bool isFPImmLegal(const APFloat &Imm, EVT VT) const override
Returns true if the target can instruction select the specified FP immediate natively.
PICLevel::Level getPICLevel() const
Returns the PIC level (small or large model)
void setNode(SDNode *N)
set the SDNode
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side...
SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, std::vector< SDNode * > *Created) const override
Targets may override this function to provide custom SDIV lowering for power-of-2 denominators...
static bool needStackSlotPassParameters(const PPCSubtarget &Subtarget, const SmallVectorImpl< ISD::OutputArg > &Outs)
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
bool isJumpTableRelative() const override
shadow stack gc Shadow Stack GC Lowering
void setIsSplitCSR(bool s)
XXSPLT - The PPC VSX splat instructions.
bool CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, unsigned Bytes, int Dist, SelectionDAG &DAG)
VECSHL - The PPC VSX shift left instruction.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
X = FP_ROUND_INREG(Y, VT) - This operator takes an FP register, and rounds it to a floating point val...
const PPCInstrInfo * getInstrInfo() const override
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
bool useSoftFloat() const
bool isPredecessorOf(const SDNode *N) const
Return true if this node is a predecessor of N.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
G8RC = ADD_TLS G8RReg, Symbol - Used by the initial-exec TLS model, produces an ADD instruction that ...
unsigned getObjectAlignment(int ObjectIdx) const
Return the alignment of the specified stack object.
bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG) const
SelectAddressRegRegOnly - Given the specified addressed, force it to be represented as an indexed [r+...
Type * getType() const
All values are typed, get the type of this value.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
Provides information about what library functions are available for the current target.
bool isVector(MCInstrInfo const &MCII, MCInst const &MCI)
CCValAssign - Represent assignment of one arg/retval to a location.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
constexpr size_t array_lengthof(T(&)[N])
Find the length of an array.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
BRCOND - Conditional branch.
static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, unsigned PtrByteSize)
CalculateStackSlotSize - Calculates the size reserved for this argument on the stack.
static const char * Target
const SDValue & getChain() const
Byte Swap and Counting operators.
static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, SelectionDAG &DAG, const SDLoc &dl)
BuildSplatI - Build a canonical splati of Val with an element size of SplatSize.
static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG, const SDLoc &dl, EVT DestVT=MVT::Other)
BuildIntrinsicOp - Return a unary operator intrinsic node with the specified intrinsic ID...
CHAIN = SC CHAIN, Imm128 - System call.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
This is an abstract virtual class for memory operations.
const char * getTargetNodeName(unsigned Opcode) const override
getTargetNodeName() - This method returns the name of a target specific DAG node. ...
BasicBlock * GetInsertBlock() const
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
bool enableAggressiveFMAFusion(EVT VT) const override
Return true if target always beneficiates from combining into FMA for a given value type...
void setHasMultipleConditionRegisters(bool hasManyRegs=true)
Tells the code generator that the target has multiple (allocatable) condition registers that can be u...
X3 = GET_TLS_ADDR X3, Symbol - For the general-dynamic TLS model, produces a call to __tls_get_addr(s...
void insertSSPDeclarations(Module &M) const override
Inserts necessary declarations for SSP (stack protection) purpose.
virtual const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase, but as an MCExpr.
static SDNode * isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG)
isCallCompatibleAddress - Return the immediate to use if the specified 32-bit value is representable ...
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset...
bool isDereferenceable() const
Represents one node in the SelectionDAG.
static Constant * get(Type *Ty, double V)
This returns a ConstantFP, or a vector containing a splat of a ConstantFP, for the specified value in...
VPERM - The PPC VPERM Instruction.
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
STXSIX - The STXSI[bh]X instruction.
static GCRegistry::Add< ShadowStackGC > C("shadow-stack","Very portable GC for uncooperative code generators")
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
static mvt_range integer_valuetypes()
i1 = ANDIo_1_[EQ|GT]_BIT(i32 or i64 x) - Represents the result of the eq or gt bit of CR0 after execu...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
SDValue expandVSXLoadForLE(SDNode *N, DAGCombinerInfo &DCI) const
SDValue getCALLSEQ_START(SDValue Chain, SDValue Op, const SDLoc &DL)
Return a new CALLSEQ_START node, which always must have a glue result (to ensure it's not CSE'd)...
G8RC = ADDIS_GOT_TPREL_HA X2, Symbol - Used by the initial-exec TLS model, produces an ADDIS8 instruc...
Value * getArgOperand(unsigned i) const
getArgOperand/setArgOperand - Return/set the i-th call argument.
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
Class to represent vector types.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SynchronizationScope SynchScope=CrossThread, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
void setIndexedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
Target - Wrapper for Target specific information.
Class for arbitrary precision integers.
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
QVGPCI = This corresponds to the QPX qvgpci instruction.
virtual const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
Return a mask of call-preserved registers for the given calling convention on the current function...
bool isLegalAddImmediate(int64_t Imm) const override
isLegalAddImmediate - Return true if the specified immediate is legal add immediate, that is the target has add instructions which can add a register and the immediate without having to materialize the immediate into a register.
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V, bool HasDirectMove)
Do we have an efficient pattern in a .td file for this node?
A "pseudo-class" with methods for operating on BUILD_VECTORs.
Select(COND, TRUEVAL, FALSEVAL).
void setMinFunctionAlignment(unsigned Align)
Set the target's minimum function alignment (in log2(bytes))
int64_t getSExtValue() const
op_iterator op_begin() const
bool isIntegerTy() const
True if this is an instance of IntegerType.
unsigned EmulatedTLS
EmulatedTLS - This flag enables emulated TLS model, using emutls function in the runtime library...
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const
Return the register class that should be used for the specified value type.
static use_iterator use_end()
void setPrefLoopAlignment(unsigned Align)
Set the target's preferred loop alignment.
ZERO_EXTEND - Used for integer types, zeroing the new bits.
ANY_EXTEND - Used for integer types. The high bits are undefined.
bool SelectAddressRegReg(SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG) const
SelectAddressRegReg - Given the specified addressed, check to see if it can be represented as an inde...
The combination of sra[wd]i and addze used to implemented signed integer division by a power of 2...
static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement, bool Swap, SDLoc &DL, SelectionDAG &DAG)
This function is called when we have proved that a SETCC node can be replaced by subtraction (and oth...
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
iterator insert(iterator I, T &&Elt)
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
bool enableMachineScheduler() const override
static bool isAcquireOrStronger(AtomicOrdering ao)
GPRC = address of GLOBAL_OFFSET_TABLE.
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const override
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase, but as an MCExpr.
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca...
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
block_iterator block_end() const
BR_JT - Jumptable branch.
Representation of each machine instruction.
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the source.
SDValue getStackArgumentTokenFactor(SDValue Chain)
Compute a TokenFactor to force all the incoming stack arguments to be loaded from the stack...
These are IR-level optimization flags that may be propagated to SDNodes.
Represents a use of a SDNode.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned char TargetFlags=0)
SmallVector< SDValue, 32 > OutVals
GPRC, CHAIN = MFBHRBE CHAIN, Entry, Dummy - Move from branch history rolling buffer entry...
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
Bitwise operators - logical and, logical or, logical xor.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Reciprocal estimate instructions (unary FP ops).
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
bool useLongCalls() const
bool isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGEOShuffleMask - Return true if this is a shuffle mask suitable for a VMRGEW or VMRGOW instructi...
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
F8RC = MFFS - This moves the FPSCR (not modeled) into the register.
ImmutableCallSite - establish a view to a call site for examination.
unsigned getSizeInBits() const
getSizeInBits - Return the size of the specified value type in bits.
void clearBit(unsigned bitPosition)
Set a given bit to 0.
static MachineOperand CreateImm(int64_t Val)
ArrayRef< SDUse > ops() const
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
Direct move from a GPR to a VSX register (zero)
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
The CMPB instruction (takes two operands of i32 or i64).
bool isIndexed() const
Return true if this is a pre/post inc/dec load/store.
The memory access always returns the same value (or traps).
CallInst * CreateCall(Value *Callee, ArrayRef< Value * > Args=None, const Twine &Name="", MDNode *FPMathTag=nullptr)
unsigned MaxStoresPerMemmoveOptSize
Maximum number of store instructions that may be substituted for a call to memmove, used for functions with OptSize attribute.
unsigned MaxStoresPerMemcpyOptSize
Maximum number of store operations that may be substituted for a call to memcpy, used for functions w...
void setStackPointerRegisterToSaveRestore(unsigned R)
If set to a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save and restore.
iterator find(const KeyT &Val)
CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a byte-swapping store instruction. ...
op_iterator op_end() const
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
GetInstSize - Return the number of bytes of code the specified instruction may be.
const SDValue & getOffset() const
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
constexpr int32_t SignExtend32(uint32_t X)
Sign-extend the number in the bottom B bits of X to a 32-bit integer.
static void setUsesTOCBasePtr(MachineFunction &MF)
TC_RETURN - A tail call return.
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
FSINCOS - Compute both fsin and fcos as a single operation.
unsigned MaxStoresPerMemcpy
Specify maximum bytes of store instructions per memcpy call.
constexpr bool isUInt< 16 >(uint64_t x)
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.setjmp intrinsic.
static bool isConstantOrUndef(int Op, int Val)
isConstantOrUndef - Op is either an undef node or a ConstantSDNode.
static bool isSplat(ArrayRef< Value * > VL)
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSS, const AllocaInst *Alloca=nullptr)
Create a new statically sized stack object, returning a nonnegative identifier to represent it...
static bool isFunctionGlobalAddress(SDValue Callee)
bool useCRBits() const
useCRBits - Return true if we should store and manipulate i1 values in the individual condition regis...
EVT getValueType() const
Return the ValueType of the referenced return value.
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
VSRC, CHAIN = XXSWAPD CHAIN, VSRC - Occurs only for little endian.
bool isPPC64() const
isPPC64 - Return true if we are generating code for 64-bit pointer mode.
SDValue getIndexedStore(SDValue OrigStoe, const SDLoc &dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
unsigned getReg() const
getReg - Returns the register number.
bool isFloatingPoint() const
isFloatingPoint - Return true if this is a FP, or a vector FP type.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void insert(iterator MBBI, MachineBasicBlock *MBB)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
void setReturnAddressIsTaken(bool s)
bool isSimple() const
isSimple - Test if the given EVT is simple (as opposed to being extended).
unsigned getAlignment() const
void setMinStackArgumentAlignment(unsigned Align)
Set the minimum stack alignment of an argument (in log2(bytes)).
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG, bool Aligned) const
SelectAddressRegImm - Returns true if the address N can be represented by a base register plus a sign...
Module * getParent()
Get the module that this global value is contained inside of...
LLVM Value Representation.
FMA - Perform a * b + c with no intermediate rounding step.
static void StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG, SDValue Chain, const SmallVectorImpl< TailCallArgumentInfo > &TailCallArgs, SmallVectorImpl< SDValue > &MemOpChains, const SDLoc &dl)
StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
SDValue getRegister(unsigned Reg, EVT VT)
void setUseUnderscoreSetJmp(bool Val)
Indicate whether this target prefers to use _setjmp to implement llvm.setjmp or the version without _...
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
isTruncateFree - Return true if it's free to truncate a value of type Ty1 to type Ty2...
CHAIN = COND_BRANCH CHAIN, CRRC, OPC, DESTBB [, INFLAG] - This corresponds to the COND_BRANCH pseudo ...
const ArgumentListType & getArgumentList() const
Get the underlying elements of the Function...
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond)
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const override
Returns relocation base for the given PIC jumptable.
SDValue getValueType(EVT)
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
PREFETCH - This corresponds to a prefetch intrinsic.
static cl::opt< bool > DisableILPPref("disable-ppc-ilp-pref", cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden)
These nodes represent the 32-bit PPC shifts that operate on 6-bit shift amounts.
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits...
unsigned getDarwinDirective() const
getDarwinDirective - Returns the -m directive specified for the cpu.
uint64_t getSize() const
Return the size in bytes of the memory reference.
const TargetLowering & getTargetLoweringInfo() const
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.Val alone...
Primary interface to the complete machine description for the target machine.
block_iterator block_begin() const
static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget, unsigned &HiOpFlags, unsigned &LoOpFlags, const GlobalValue *GV=nullptr)
Return true if we should reference labels using a PICBase, set the HiOpFlags and LoOpFlags to the tar...
StringRef - Represent a constant reference to a string, i.e.
APInt zext(unsigned width) const
Zero extend to a new width.
SetCC operator - This evaluates to a true value iff the condition is true.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
unsigned MaxStoresPerMemsetOptSize
Maximum number of stores operations that may be substituted for the call to memset, used for functions with OptSize attribute.
static BranchProbability getZero()
MachineBasicBlock * EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *MBB, unsigned AtomicSize, unsigned BinOpcode, unsigned CmpOpcode=0, unsigned CmpPred=0) const
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
unsigned getLocMemOffset() const
Extract a subvector from signed integer vector and convert to FP.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
RESVEC = VCMP(LHS, RHS, OPC) - Represents one of the altivec VCMP* instructions.
TRUNCATE - Completely drop the high bits.
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64 operand, producing an f64 value...
Hi/Lo - These represent the high and low 16-bit parts of a global address respectively.
unsigned getLinkageSize() const
getLinkageSize - Return the size of the PowerPC ABI linkage area.
unsigned getAlignment() const
unsigned AllocateReg(unsigned Reg)
AllocateReg - Attempt to allocate one register.
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
On a symbol operand "FOO", this indicates that the reference is actually to "FOO@plt".
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
FastISel * createFastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const override
createFastISel - This method returns a target-specific FastISel object, or null if the target does no...
FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW, FLOG, FLOG2, FLOG10, FEXP, FEXP2, FCEIL, FTRUNC, FRINT, FNEARBYINT, FROUND, FFLOOR - Perform various unary floating point operations.
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
F8RC = FADDRTZ F8RC, F8RC - This is an FADD done with rounding towards zero.
An SDNode for swaps that are not associated with any loads/stores and thereby have no chain...
unsigned AllocateStack(unsigned Size, unsigned Align)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
Fast - This calling convention attempts to make calls as fast as possible (e.g.
RESVEC, OUTFLAG = VCMPo(LHS, RHS, OPC) - Represents one of the altivec VCMP*o instructions.
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
GPRC, CHAIN = LFIWZX CHAIN, Ptr - This is a floating-point load which zero-extends from a 32-bit inte...
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
EVT changeVectorElementTypeToInteger() const
changeVectorElementTypeToInteger - Return a vector with the same number of elements as this vector...
MO_PIC_FLAG - If this bit is set, the symbol reference is relative to the function's picbase...
bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for a VRGH* instruction with the ...
MVT getSimpleVT() const
getSimpleVT - Return the SimpleValueType held in the specified simple EVT.
static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, const SDLoc &dl)
CreateCopyOfByValArgument - Make a copy of an aggregate at address specified by "Src" to address "Dst...
StringRef getSection() const
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned char TargetFlags=0)
void setIndexedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
uint64_t getZExtValue() const
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
int isQVALIGNIShuffleMask(SDNode *N)
If this is a qvaligni shuffle mask, return the shift amount, otherwise return -1. ...
unsigned getVectorNumElements() const
getVectorNumElements - Given a vector type, return the number of elements it contains.
This class is used to represent ISD::LOAD nodes.
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary...
bool useSoftFloat() const override