70 bool isPPC64 = Subtarget.
isPPC64();
105 if (isPPC64 || Subtarget.
hasFPCVT()) {
929 unsigned MaxMaxAlign) {
930 if (MaxAlign == MaxMaxAlign)
932 if (
VectorType *VTy = dyn_cast<VectorType>(Ty)) {
933 if (MaxMaxAlign >= 32 && VTy->getBitWidth() >= 256)
935 else if (VTy->getBitWidth() >= 128 && MaxAlign < 16)
937 }
else if (
ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
938 unsigned EltAlign = 0;
940 if (EltAlign > MaxAlign)
942 }
else if (
StructType *STy = dyn_cast<StructType>(Ty)) {
943 for (
unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
944 unsigned EltAlign = 0;
946 if (EltAlign > MaxAlign)
948 if (MaxAlign == MaxMaxAlign)
1083 return CFP->getValueAPF().isZero();
1087 if (
const ConstantFP *CFP = dyn_cast<ConstantFP>(
CP->getConstVal()))
1088 return CFP->getValueAPF().isZero();
1096 return Op < 0 || Op == Val;
1108 if (ShuffleKind == 0) {
1111 for (
unsigned i = 0; i != 16; ++i)
1114 }
else if (ShuffleKind == 2) {
1117 for (
unsigned i = 0; i != 16; ++i)
1120 }
else if (ShuffleKind == 1) {
1121 unsigned j = IsLE ? 0 : 1;
1122 for (
unsigned i = 0; i != 8; ++i)
1139 if (ShuffleKind == 0) {
1142 for (
unsigned i = 0; i != 16; i += 2)
1146 }
else if (ShuffleKind == 2) {
1149 for (
unsigned i = 0; i != 16; i += 2)
1153 }
else if (ShuffleKind == 1) {
1154 unsigned j = IsLE ? 0 : 2;
1155 for (
unsigned i = 0; i != 8; i += 2)
1181 if (ShuffleKind == 0) {
1184 for (
unsigned i = 0; i != 16; i += 4)
1190 }
else if (ShuffleKind == 2) {
1193 for (
unsigned i = 0; i != 16; i += 4)
1199 }
else if (ShuffleKind == 1) {
1200 unsigned j = IsLE ? 0 : 4;
1201 for (
unsigned i = 0; i != 8; i += 4)
1218 unsigned LHSStart,
unsigned RHSStart) {
1221 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
1222 "Unsupported merge size!");
1224 for (
unsigned i = 0; i != 8/UnitSize; ++i)
1225 for (
unsigned j = 0; j != UnitSize; ++j) {
1227 LHSStart+j+i*UnitSize) ||
1229 RHSStart+j+i*UnitSize))
1244 if (ShuffleKind == 1)
1245 return isVMerge(N, UnitSize, 0, 0);
1246 else if (ShuffleKind == 2)
1247 return isVMerge(N, UnitSize, 0, 16);
1251 if (ShuffleKind == 1)
1252 return isVMerge(N, UnitSize, 8, 8);
1253 else if (ShuffleKind == 0)
1254 return isVMerge(N, UnitSize, 8, 24);
1269 if (ShuffleKind == 1)
1270 return isVMerge(N, UnitSize, 8, 8);
1271 else if (ShuffleKind == 2)
1272 return isVMerge(N, UnitSize, 8, 24);
1276 if (ShuffleKind == 1)
1277 return isVMerge(N, UnitSize, 0, 0);
1278 else if (ShuffleKind == 0)
1279 return isVMerge(N, UnitSize, 0, 16);
1328 unsigned RHSStartValue) {
1332 for (
unsigned i = 0; i < 2; ++i)
1333 for (
unsigned j = 0; j < 4; ++j)
1335 i*RHSStartValue+j+IndexOffset) ||
1337 i*RHSStartValue+j+IndexOffset+8))
1359 unsigned indexOffset = CheckEven ? 4 : 0;
1360 if (ShuffleKind == 1)
1361 return isVMerge(N, indexOffset, 0);
1362 else if (ShuffleKind == 2)
1363 return isVMerge(N, indexOffset, 16);
1368 unsigned indexOffset = CheckEven ? 0 : 4;
1369 if (ShuffleKind == 1)
1370 return isVMerge(N, indexOffset, 0);
1371 else if (ShuffleKind == 0)
1372 return isVMerge(N, indexOffset, 16);
1394 for (i = 0; i != 16 && SVOp->
getMaskElt(i) < 0; ++i)
1397 if (i == 16)
return -1;
1402 if (ShiftAmt < i)
return -1;
1407 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
1409 for (++i; i != 16; ++i)
1412 }
else if (ShuffleKind == 1) {
1414 for (++i; i != 16; ++i)
1421 ShiftAmt = 16 - ShiftAmt;
1431 (EltSize == 1 || EltSize == 2 || EltSize == 4));
1443 if (ElementBase >= 16)
1448 for (
unsigned i = 1; i != EltSize; ++i)
1452 for (
unsigned i = EltSize, e = 16; i != e; i += EltSize) {
1454 for (
unsigned j = 0; j != EltSize; ++j)
1468 return (16 / EltSize) - 1 - (SVOp->
getMaskElt(0) / EltSize);
1485 if (EltSize < ByteSize) {
1486 unsigned Multiple = ByteSize/EltSize;
1488 assert(Multiple > 1 && Multiple <= 4 &&
"How can this happen?");
1497 if (!UniquedVals[i&(Multiple-1)].getNode())
1498 UniquedVals[i&(Multiple-1)] = N->
getOperand(i);
1499 else if (UniquedVals[i&(Multiple-1)] != N->
getOperand(i))
1509 bool LeadingZero =
true;
1510 bool LeadingOnes =
true;
1511 for (
unsigned i = 0; i != Multiple-1; ++i) {
1512 if (!UniquedVals[i].getNode())
continue;
1514 LeadingZero &= cast<ConstantSDNode>(UniquedVals[i])->isNullValue();
1515 LeadingOnes &= cast<ConstantSDNode>(UniquedVals[i])->isAllOnesValue();
1519 if (!UniquedVals[Multiple-1].getNode())
1521 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
1526 if (!UniquedVals[Multiple-1].getNode())
1528 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
1547 unsigned ValSizeInBytes = EltSize;
1550 Value = CN->getZExtValue();
1552 assert(CN->getValueType(0) ==
MVT::f32 &&
"Only one legal FP vector type!");
1553 Value =
FloatToBits(CN->getValueAPF().convertToFloat());
1559 if (ValSizeInBytes < ByteSize)
return SDValue();
1563 if (!
APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8))
1570 if (MaskVal == 0)
return SDValue();
1573 if (SignExtend32<5>(MaskVal) == MaskVal)
1589 for (i = 0; i != 4 && SVOp->
getMaskElt(i) < 0; ++i)
1592 if (i == 4)
return -1;
1597 if (ShiftAmt < i)
return -1;
1601 for (++i; i != 4; ++i)
1617 if (!isa<ConstantSDNode>(N))
1620 Imm = (short)cast<ConstantSDNode>(N)->getZExtValue();
1622 return Imm == (int32_t)cast<ConstantSDNode>(
N)->getZExtValue();
1624 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
1654 APInt LHSKnownZero, LHSKnownOne;
1655 APInt RHSKnownZero, RHSKnownOne;
1657 LHSKnownZero, LHSKnownOne);
1661 RHSKnownZero, RHSKnownOne);
1664 if (~(LHSKnownZero | RHSKnownZero) == 0) {
1722 bool Aligned)
const {
1732 (!Aligned || (imm & 3) == 0)) {
1744 &&
"Cannot handle constant offsets yet!");
1756 (!Aligned || (imm & 3) == 0)) {
1760 APInt LHSKnownZero, LHSKnownOne;
1763 if ((LHSKnownZero.
getZExtValue()|~(uint64_t)imm) == ~0ULL) {
1767 dyn_cast<FrameIndexSDNode>(N.
getOperand(0))) {
1786 CN->getValueType(0));
1791 if ((CN->getValueType(0) ==
MVT::i32 ||
1792 (int64_t)CN->getZExtValue() == (
int)CN->getZExtValue()) &&
1793 (!Aligned || (CN->getZExtValue() & 3) == 0)) {
1794 int Addr = (
int)CN->getZExtValue();
1857 Ptr =
LD->getBasePtr();
1858 VT =
LD->getMemoryVT();
1859 Alignment =
LD->getAlignment();
1861 Ptr =
ST->getBasePtr();
1862 VT =
ST->getMemoryVT();
1863 Alignment =
ST->getAlignment();
1887 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base))
1890 SDValue Val = cast<StoreSDNode>(
N)->getValue();
1920 isa<ConstantSDNode>(Offset))
1936 unsigned &HiOpFlags,
unsigned &LoOpFlags,
1955 if (GV->hasHiddenVisibility()) {
2019 unsigned MOHiFlag, MOLoFlag;
2048 unsigned MOHiFlag, MOLoFlag;
2077 unsigned MOHiFlag, MOLoFlag;
2096 bool is64bit = Subtarget.
isPPC64();
2122 PtrVT, GOTReg, TGA);
2126 PtrVT, TGA, GOTPtr);
2163 PtrVT, GOTPtr, TGA, TGA);
2165 PtrVT, TLSAddr, TGA);
2187 unsigned MOHiFlag, MOLoFlag;
2209 false,
false,
false, 0);
2245 if (VT.
bitsLT(MVT::i32)) {
2258 if (C->isAllOnesValue() || C->isNullValue())
2284 const Value *SV = cast<SrcValueSDNode>(Node->
getOperand(2))->getValue();
2287 assert(!Subtarget.
isPPC64() &&
"LowerVAARG is PPC32 only");
2292 false,
false,
false, 0);
2315 false,
false,
false, 0);
2325 SDValue OverflowArea = DAG.
getLoad(MVT::i32, dl, InChain, OverflowAreaPtr,
2328 InChain = OverflowArea.
getValue(1);
2330 SDValue RegSaveArea = DAG.
getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr,
2379 MVT::i32,
false,
false, 0);
2382 false,
false,
false, 0);
2387 assert(!Subtarget.
isPPC64() &&
"LowerVACOPY is PPC32 only");
2411 bool isPPC64 = (PtrVT ==
MVT::i64);
2417 Entry.
Ty = IntPtrTy;
2418 Entry.
Node = Trmp; Args.push_back(Entry);
2423 Args.push_back(Entry);
2425 Entry.
Node = FPtr; Args.push_back(Entry);
2426 Entry.
Node = Nest; Args.push_back(Entry);
2430 CLI.setDebugLoc(dl).setChain(Chain)
2433 std::move(Args), 0);
2435 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
2436 return CallResult.second;
2498 uint64_t FPROffset = 1;
2508 uint64_t nextOffset = FPROffset;
2517 nextOffset += StackOffset;
2518 nextPtr = DAG.
getNode(
ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
2522 DAG.
getStore(secondStore, dl, StackOffsetFI, nextPtr,
2525 nextOffset += FrameOffset;
2526 nextPtr = DAG.
getNode(
ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
2529 return DAG.
getStore(thirdStore, dl, FR, nextPtr,
2535 #include "PPCGenCallingConv.inc"
2539 CCAssignFn *PPCTargetLowering::useFastISelCCs(
unsigned Flag)
const {
2540 return Flag ? CC_PPC64_ELF_FIS : RetCC_PPC64_ELF_FIS;
2557 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
2567 if (RegNum != NumArgRegs && RegNum % 2 == 1) {
2583 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
2593 if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) {
2606 static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5,
2607 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10,
2608 PPC::F11, PPC::F12, PPC::F13};
2612 PPC::QF1, PPC::QF2, PPC::QF3, PPC::QF4, PPC::QF5, PPC::QF6, PPC::QF7,
2613 PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13};
2618 unsigned PtrByteSize) {
2626 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
2635 unsigned PtrByteSize) {
2636 unsigned Align = PtrByteSize;
2652 if (BVAlign > PtrByteSize) {
2653 if (BVAlign % PtrByteSize != 0)
2655 "ByVal alignment is not a multiple of the pointer size");
2681 unsigned PtrByteSize,
2682 unsigned LinkageSize,
2683 unsigned ParamAreaSize,
2684 unsigned &ArgOffset,
2685 unsigned &AvailableFPRs,
2686 unsigned &AvailableVRs,
bool HasQPX) {
2687 bool UseMemory =
false;
2692 ArgOffset = ((ArgOffset + Align - 1) / Align) *
Align;
2695 if (ArgOffset >= LinkageSize + ParamAreaSize)
2701 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
2704 if (ArgOffset > LinkageSize + ParamAreaSize)
2715 if (AvailableFPRs > 0) {
2723 if (AvailableVRs > 0) {
2735 unsigned NumBytes) {
2737 unsigned AlignMask = TargetAlign - 1;
2738 NumBytes = (NumBytes + AlignMask) & ~AlignMask;
2743 PPCTargetLowering::LowerFormalArguments(
SDValue Chain,
2752 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins,
2755 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins,
2758 return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins,
2764 PPCTargetLowering::LowerFormalArguments_32SVR4(
2809 unsigned PtrByteSize = 4;
2818 CCInfo.AllocateStack(LinkageSize, PtrByteSize);
2820 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4);
2822 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
2835 RC = &PPC::GPRCRegClass;
2839 RC = &PPC::VSSRCRegClass;
2841 RC = &PPC::F4RCRegClass;
2845 RC = &PPC::VSFRCRegClass;
2847 RC = &PPC::F8RCRegClass;
2852 RC = &PPC::VRRCRegClass;
2855 RC = Subtarget.
hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass;
2859 RC = &PPC::VSHRCRegClass;
2862 RC = &PPC::QFRCRegClass;
2865 RC = &PPC::QBRCRegClass;
2872 ValVT ==
MVT::i1 ? MVT::i32 : ValVT);
2890 false,
false,
false, 0));
2902 CCByValInfo.
AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
2904 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal);
2907 unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
2908 MinReservedArea = std::max(MinReservedArea, LinkageSize);
2925 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
2930 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
2941 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
2946 CCInfo.getNextStackOffset(),
true));
2954 for (
unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
2958 VReg = MF.
addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
2973 for (
unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
2977 VReg = MF.
addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
2990 if (!MemOps.
empty())
3013 PPCTargetLowering::LowerFormalArguments_64SVR4(
3029 "fastcc not supported on varargs functions");
3035 unsigned PtrByteSize = 8;
3039 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
3040 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
3044 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
3047 PPC::VSH2, PPC::VSH3, PPC::VSH4, PPC::VSH5, PPC::VSH6, PPC::VSH7, PPC::VSH8,
3048 PPC::VSH9, PPC::VSH10, PPC::VSH11, PPC::VSH12, PPC::VSH13
3052 const unsigned Num_FPR_Regs = 13;
3054 const unsigned Num_QFPR_Regs = Num_FPR_Regs;
3062 bool HasParameterArea = !isELFv2ABI || isVarArg;
3063 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize;
3064 unsigned NumBytes = LinkageSize;
3065 unsigned AvailableFPRs = Num_FPR_Regs;
3066 unsigned AvailableVRs = Num_VR_Regs;
3067 for (
unsigned i = 0, e = Ins.
size(); i != e; ++i) {
3068 if (Ins[i].Flags.
isNest())
3072 PtrByteSize, LinkageSize, ParamAreaSize,
3073 NumBytes, AvailableFPRs, AvailableVRs,
3075 HasParameterArea =
true;
3082 unsigned ArgOffset = LinkageSize;
3083 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
3084 unsigned &QFPR_idx = FPR_idx;
3087 unsigned CurArgIdx = 0;
3088 for (
unsigned ArgNo = 0, e = Ins.
size(); ArgNo != e; ++ArgNo) {
3090 bool needsLoad =
false;
3091 EVT ObjectVT = Ins[ArgNo].VT;
3092 EVT OrigVT = Ins[ArgNo].ArgVT;
3094 unsigned ArgSize = ObjSize;
3096 if (Ins[ArgNo].isOrigArg()) {
3097 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
3098 CurArgIdx = Ins[ArgNo].getOrigArgIndex();
3103 unsigned CurArgOffset,
Align;
3104 auto ComputeArgOffset = [&]() {
3107 ArgOffset = ((ArgOffset + Align - 1) / Align) *
Align;
3108 CurArgOffset = ArgOffset;
3115 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
3116 GPR_idx =
std::min(GPR_idx, Num_GPR_Regs);
3122 assert(Ins[ArgNo].isOrigArg() &&
"Byval arguments cannot be implicit");
3129 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3151 if (HasParameterArea ||
3152 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize)
3159 if (ObjSize < PtrByteSize) {
3163 if (!isLittleEndian) {
3169 if (GPR_idx != Num_GPR_Regs) {
3170 unsigned VReg = MF.
addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
3174 if (ObjSize==1 || ObjSize==2 || ObjSize==4) {
3175 EVT ObjType = (ObjSize == 1 ? MVT::i8 :
3179 ObjType,
false,
false, 0);
3193 ArgOffset += PtrByteSize;
3202 for (
unsigned j = 0; j < ArgSize; j += PtrByteSize) {
3203 if (GPR_idx == Num_GPR_Regs)
3206 unsigned VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
3219 ArgOffset += ArgSize;
3230 unsigned VReg = MF.
addLiveIn(PPC::X11, &PPC::G8RCRegClass);
3233 if (ObjectVT == MVT::i32 || ObjectVT ==
MVT::i1)
3234 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
3242 if (GPR_idx != Num_GPR_Regs) {
3243 unsigned VReg = MF.
addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
3246 if (ObjectVT == MVT::i32 || ObjectVT ==
MVT::i1)
3249 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
3255 ArgSize = PtrByteSize;
3266 if (FPR_idx != Num_FPR_Regs) {
3272 ? &PPC::VSSRCRegClass
3273 : &PPC::F4RCRegClass);
3276 ? &PPC::VSFRCRegClass
3277 : &PPC::F8RCRegClass);
3288 unsigned VReg = MF.
addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
3292 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0))
3311 ArgOffset += ArgSize;
3313 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3323 if (!Subtarget.
hasQPX()) {
3327 if (VR_idx != Num_VR_Regs) {
3329 MF.
addLiveIn(VSRH[VR_idx], &PPC::VSHRCRegClass) :
3330 MF.
addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
3345 "Invalid QPX parameter type");
3353 if (QFPR_idx != Num_QFPR_Regs) {
3356 case MVT::v4f64: RC = &PPC::QFRCRegClass;
break;
3357 case MVT::v4f32: RC = &PPC::QSRCRegClass;
break;
3358 default: RC = &PPC::QBRCRegClass;
break;
3377 if (ObjSize < ArgSize && !isLittleEndian)
3378 CurArgOffset += ArgSize - ObjSize;
3382 false,
false,
false, 0);
3389 unsigned MinReservedArea;
3390 if (HasParameterArea)
3391 MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize);
3393 MinReservedArea = LinkageSize;
3406 int Depth = ArgOffset;
3415 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
3416 GPR_idx < Num_GPR_Regs; ++GPR_idx) {
3417 unsigned VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
3428 if (!MemOps.
empty())
3435 PPCTargetLowering::LowerFormalArguments_Darwin(
3453 unsigned PtrByteSize = isPPC64 ? 8 : 4;
3455 unsigned ArgOffset = LinkageSize;
3457 unsigned MinReservedArea = ArgOffset;
3461 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
3464 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
3465 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
3469 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
3473 const unsigned Num_FPR_Regs = 13;
3476 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
3478 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
3487 unsigned VecArgOffset = ArgOffset;
3488 if (!isVarArg && !isPPC64) {
3489 for (
unsigned ArgNo = 0, e = Ins.
size(); ArgNo != e;
3491 EVT ObjectVT = Ins[ArgNo].VT;
3498 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3499 VecArgOffset += ArgSize;
3527 VecArgOffset = ((VecArgOffset+15)/16)*16;
3528 VecArgOffset += 12*16;
3535 unsigned nAltivecParamsAtEnd = 0;
3537 unsigned CurArgIdx = 0;
3538 for (
unsigned ArgNo = 0, e = Ins.
size(); ArgNo != e; ++ArgNo) {
3540 bool needsLoad =
false;
3541 EVT ObjectVT = Ins[ArgNo].VT;
3543 unsigned ArgSize = ObjSize;
3545 if (Ins[ArgNo].isOrigArg()) {
3546 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
3547 CurArgIdx = Ins[ArgNo].getOrigArgIndex();
3549 unsigned CurArgOffset = ArgOffset;
3554 if (isVarArg || isPPC64) {
3555 MinReservedArea = ((MinReservedArea+15)/16)*16;
3559 }
else nAltivecParamsAtEnd++;
3569 assert(Ins[ArgNo].isOrigArg() &&
"Byval arguments cannot be implicit");
3573 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3576 if (ObjSize==1 || ObjSize==2) {
3577 CurArgOffset = CurArgOffset + (4 - ObjSize);
3583 if (ObjSize==1 || ObjSize==2) {
3584 if (GPR_idx != Num_GPR_Regs) {
3587 VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
3589 VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
3594 ObjType,
false,
false, 0);
3599 ArgOffset += PtrByteSize;
3603 for (
unsigned j = 0; j < ArgSize; j += PtrByteSize) {
3607 if (GPR_idx != Num_GPR_Regs) {
3610 VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
3612 VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
3621 ArgOffset += PtrByteSize;
3623 ArgOffset += ArgSize - (ArgOffset-CurArgOffset);
3635 if (GPR_idx != Num_GPR_Regs) {
3636 unsigned VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
3645 ArgSize = PtrByteSize;
3648 ArgOffset += PtrByteSize;
3653 if (GPR_idx != Num_GPR_Regs) {
3654 unsigned VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
3657 if (ObjectVT == MVT::i32 || ObjectVT ==
MVT::i1)
3660 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
3665 ArgSize = PtrByteSize;
3675 if (GPR_idx != Num_GPR_Regs) {
3677 if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64)
3680 if (FPR_idx != Num_FPR_Regs) {
3695 ArgOffset += isPPC64 ? 8 : ObjSize;
3703 if (VR_idx != Num_VR_Regs) {
3704 unsigned VReg = MF.
addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
3707 while ((ArgOffset % 16) != 0) {
3708 ArgOffset += PtrByteSize;
3709 if (GPR_idx != Num_GPR_Regs)
3713 GPR_idx =
std::min(GPR_idx+4, Num_GPR_Regs);
3717 if (!isVarArg && !isPPC64) {
3719 CurArgOffset = VecArgOffset;
3723 ArgOffset = ((ArgOffset+15)/16)*16;
3724 CurArgOffset = ArgOffset;
3736 CurArgOffset + (ArgSize - ObjSize),
3740 false,
false,
false, 0);
3747 if (nAltivecParamsAtEnd) {
3748 MinReservedArea = ((MinReservedArea+15)/16)*16;
3749 MinReservedArea += 16*nAltivecParamsAtEnd;
3753 MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize);
3766 int Depth = ArgOffset;
3776 for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
3780 VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
3782 VReg = MF.
addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
3794 if (!MemOps.
empty())
3803 unsigned ParamSize) {
3805 if (!isTailCall)
return 0;
3809 int SPDiff = (
int)CallerMinReservedArea - (
int)ParamSize;
3811 if (SPDiff < FI->getTailCallSPDelta())
3821 PPCTargetLowering::IsEligibleForTailCallOptimization(
SDValue Callee,
3837 for (
unsigned i = 0; i != Ins.
size(); i++) {
3839 if (Flags.
isByVal())
return false;
3849 return G->getGlobal()->hasHiddenVisibility()
3850 ||
G->getGlobal()->hasProtectedVisibility();
3860 if (!C)
return nullptr;
3863 if ((Addr & 3) != 0 ||
3864 SignExtend32<26>(Addr) != Addr)
3874 struct TailCallArgumentInfo {
3879 TailCallArgumentInfo() : FrameIdx(0) {}
3891 for (
unsigned i = 0, e = TailCallArgs.
size(); i != e; ++i) {
3892 SDValue Arg = TailCallArgs[i].Arg;
3893 SDValue FIN = TailCallArgs[i].FrameIdxOp;
3894 int FI = TailCallArgs[i].FrameIdx;
3915 int SlotSize = isPPC64 ? 8 : 4;
3920 NewRetAddrLoc,
true);
3923 Chain = DAG.
getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
3934 Chain = DAG.
getStore(Chain, dl, OldFP, NewFramePtrIdx,
3946 SDValue Arg,
int SPDiff,
unsigned ArgOffset,
3948 int Offset = ArgOffset + SPDiff;
3953 TailCallArgumentInfo Info;
3955 Info.FrameIdxOp = FIN;
3973 LROpOut = getReturnAddrFrameIndex(DAG);
3975 false,
false,
false, 0);
3981 FPOpOut = getFramePointerFrameIndex(DAG);
3983 false,
false,
false, 0);
4011 unsigned ArgOffset,
bool isPPC64,
bool isTailCall,
4035 SDLoc dl,
bool isPPC64,
int SPDiff,
unsigned NumBytes,
4047 if (!MemOpChains2.
empty())
4052 isPPC64, isDarwinABI, dl);
4068 return G->getGlobal()->getType()->getElementType()->isFunctionTy();
4077 bool isTailCall,
bool IsPatchPoint,
bool hasNest,
4082 bool isPPC64 = Subtarget.
isPPC64();
4092 bool needIndirectCall =
true;
4093 if (!isSVR4ABI || !isPPC64)
4097 needIndirectCall =
false;
4104 unsigned OpFlags = 0;
4123 needIndirectCall =
false;
4127 unsigned char OpFlags = 0;
4142 needIndirectCall =
false;
4152 needIndirectCall =
false;
4155 if (needIndirectCall) {
4158 SDValue MTCTROps[] = {Chain, Callee, InFlag};
4160 if (isSVR4ABI && isPPC64 && !isELFv2ABI) {
4198 false,
false, LoadsInv, 8);
4204 MPI.getWithOffset(16),
false,
false,
4210 MPI.getWithOffset(8),
false,
false,
4229 MTCTROps[0] = Chain;
4230 MTCTROps[1] = LoadFuncPtr;
4231 MTCTROps[2] = InFlag;
4245 if (isSVR4ABI && isPPC64 && !isELFv2ABI && !hasNest)
4263 for (
unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
4265 RegsToPass[i].second.getValueType()));
4269 if (isSVR4ABI && isPPC64 && !IsPatchPoint) {
4281 return G->getGlobal()->isStrongDefinitionForLinker();
4286 PPCTargetLowering::LowerCallResult(
SDValue Chain,
SDValue InFlag,
4298 for (
unsigned i = 0, e = RVLocs.
size(); i != e; ++i) {
4300 assert(VA.
isRegLoc() &&
"Can only return in registers!");
4333 bool isTailCall,
bool isVarArg,
bool IsPatchPoint,
4339 int SPDiff,
unsigned NumBytes,
4344 std::vector<EVT> NodeTys;
4346 unsigned CallOpc =
PrepareCall(DAG, Callee, InFlag, Chain, CallSeqStart, dl,
4347 SPDiff, isTailCall, IsPatchPoint, hasNest,
4348 RegsToPass, Ops, NodeTys, CS, Subtarget);
4357 int BytesCalleePops =
4363 const uint32_t *Mask =
4365 assert(Mask &&
"Missing call preserved mask for calling convention");
4374 cast<RegisterSDNode>(Callee)->
getReg() == PPC::CTR) ||
4377 isa<ConstantSDNode>(Callee)) &&
4378 "Expecting an global address, external symbol, absolute value or register");
4423 Chain = DAG.
getNode(CallOpc, dl, NodeTys, Ops);
4432 return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
4433 Ins, dl, DAG, InVals);
4453 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
4458 "site marked musttail");
4462 return LowerCall_64SVR4(Chain, Callee, CallConv, isVarArg,
4463 isTailCall, IsPatchPoint, Outs, OutVals, Ins,
4464 dl, DAG, InVals, CS);
4466 return LowerCall_32SVR4(Chain, Callee, CallConv, isVarArg,
4467 isTailCall, IsPatchPoint, Outs, OutVals, Ins,
4468 dl, DAG, InVals, CS);
4471 return LowerCall_Darwin(Chain, Callee, CallConv, isVarArg,
4472 isTailCall, IsPatchPoint, Outs, OutVals, Ins,
4473 dl, DAG, InVals, CS);
4477 PPCTargetLowering::LowerCall_32SVR4(
SDValue Chain,
SDValue Callee,
4479 bool isTailCall,
bool IsPatchPoint,
4492 unsigned PtrByteSize = 4;
4522 unsigned NumArgs = Outs.
size();
4524 for (
unsigned i = 0; i != NumArgs; ++i) {
4525 MVT ArgVT = Outs[i].VT;
4529 if (Outs[i].IsFixed) {
4539 errs() <<
"Call operand #" << i <<
" has unhandled type "
4547 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4);
4556 CCByValInfo.
AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
4558 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal);
4563 unsigned NumBytes = CCByValInfo.getNextStackOffset();
4578 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp,
false,
4590 bool seenFloatArg =
false;
4592 for (
unsigned i = 0, j = 0, e = ArgLocs.
size();
4604 assert((j < ByValArgLocs.
size()) &&
"Index out of bounds!");
4623 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
4626 DAG.ReplaceAllUsesWith(CallSeqStart.
getNode(),
4627 NewCallSeqStart.getNode());
4628 Chain = CallSeqStart = NewCallSeqStart;
4649 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
4653 MemOpChains.
push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
4664 if (!MemOpChains.
empty())
4670 for (
unsigned i = 0, e = RegsToPass.
size(); i != e; ++i) {
4671 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
4672 RegsToPass[i].second, InFlag);
4680 SDValue Ops[] = { Chain, InFlag };
4689 PrepareTailCall(DAG, InFlag, Chain, dl,
false, SPDiff, NumBytes, LROp, FPOp,
4690 false, TailCallArguments);
4692 return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint,
4694 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff,
4695 NumBytes, Ins, InVals, CS);
4701 PPCTargetLowering::createMemcpyOutsideCallSeq(
SDValue Arg,
SDValue PtrOff,
4710 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
4713 DAG.ReplaceAllUsesWith(CallSeqStart.
getNode(),
4714 NewCallSeqStart.getNode());
4715 return NewCallSeqStart;
4719 PPCTargetLowering::LowerCall_64SVR4(
SDValue Chain,
SDValue Callee,
4721 bool isTailCall,
bool IsPatchPoint,
4731 unsigned NumOps = Outs.
size();
4732 bool hasNest =
false;
4735 unsigned PtrByteSize = 8;
4749 "fastcc not supported on varargs functions");
4756 unsigned NumBytes = LinkageSize;
4757 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
4758 unsigned &QFPR_idx = FPR_idx;
4761 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4762 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4766 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4769 PPC::VSH2, PPC::VSH3, PPC::VSH4, PPC::VSH5, PPC::VSH6, PPC::VSH7, PPC::VSH8,
4770 PPC::VSH9, PPC::VSH10, PPC::VSH11, PPC::VSH12, PPC::VSH13
4774 const unsigned NumFPRs = 13;
4776 const unsigned NumQFPRs = NumFPRs;
4780 unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0;
4783 for (
unsigned i = 0; i != NumOps; ++i) {
4785 EVT ArgVT = Outs[i].VT;
4786 EVT OrigVT = Outs[i].ArgVT;
4800 if (++NumGPRsUsed <= NumGPRs)
4809 if (++NumVRsUsed <= NumVRs)
4815 if (Subtarget.
hasQPX()) {
4816 if (++NumFPRsUsed <= NumFPRs)
4819 if (++NumVRsUsed <= NumVRs)
4827 if (++NumFPRsUsed <= NumFPRs)
4836 NumBytes = ((NumBytes + Align - 1) / Align) *
Align;
4840 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4843 unsigned NumBytesActuallyUsed = NumBytes;
4851 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
4876 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp,
true,
4888 unsigned ArgOffset = LinkageSize;
4894 for (
unsigned i = 0; i != NumOps; ++i) {
4897 EVT ArgVT = Outs[i].VT;
4898 EVT OrigVT = Outs[i].ArgVT;
4907 auto ComputePtrOff = [&]() {
4911 ArgOffset = ((ArgOffset + Align - 1) / Align) *
Align;
4922 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4923 GPR_idx =
std::min(GPR_idx, NumGPRs);
4953 if (Size==1 || Size==2 || Size==4) {
4955 if (GPR_idx != NumGPRs) {
4958 false,
false,
false, 0);
4960 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
4962 ArgOffset += PtrByteSize;
4967 if (GPR_idx == NumGPRs && Size < 8) {
4969 if (!isLittleEndian) {
4974 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
4977 ArgOffset += PtrByteSize;
4994 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
4999 if (Size < 8 && GPR_idx != NumGPRs) {
5009 if (!isLittleEndian) {
5013 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
5020 false,
false,
false, 0);
5022 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
5025 ArgOffset += PtrByteSize;
5031 for (
unsigned j=0; j<Size; j+=PtrByteSize) {
5034 if (GPR_idx != NumGPRs) {
5037 false,
false,
false, 0);
5039 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
5040 ArgOffset += PtrByteSize;
5042 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
5056 RegsToPass.
push_back(std::make_pair(PPC::X11, Arg));
5064 if (GPR_idx != NumGPRs) {
5065 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Arg));
5071 true, isTailCall,
false, MemOpChains,
5072 TailCallArguments, dl);
5074 ArgOffset += PtrByteSize;
5077 ArgOffset += PtrByteSize;
5090 bool NeedGPROrStack = isVarArg || FPR_idx == NumFPRs;
5091 bool NeededLoad =
false;
5094 if (FPR_idx != NumFPRs)
5095 RegsToPass.
push_back(std::make_pair(
FPR[FPR_idx++], Arg));
5098 if (!NeedGPROrStack)
5120 }
else if (ArgOffset % PtrByteSize != 0) {
5124 if (!isLittleEndian)
5132 if (!isLittleEndian)
5142 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], ArgVal));
5156 true, isTailCall,
false, MemOpChains,
5157 TailCallArguments, dl);
5168 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
5179 if (!Subtarget.
hasQPX()) {
5194 if (VR_idx != NumVRs) {
5197 false,
false,
false, 0);
5202 VSRH[VR_idx] : VR[VR_idx];
5205 RegsToPass.
push_back(std::make_pair(VReg, Load));
5208 for (
unsigned i=0; i<16; i+=PtrByteSize) {
5209 if (GPR_idx == NumGPRs)
5214 false,
false,
false, 0);
5216 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
5222 if (VR_idx != NumVRs) {
5225 VSRH[VR_idx] : VR[VR_idx];
5228 RegsToPass.
push_back(std::make_pair(VReg, Arg));
5234 true, isTailCall,
true, MemOpChains,
5235 TailCallArguments, dl);
5246 "Invalid QPX parameter type");
5258 if (QFPR_idx != NumQFPRs) {
5261 false,
false,
false, 0);
5263 RegsToPass.
push_back(std::make_pair(
QFPR[QFPR_idx++], Load));
5265 ArgOffset += (IsF32 ? 16 : 32);
5266 for (
unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) {
5267 if (GPR_idx == NumGPRs)
5272 false,
false,
false, 0);
5274 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
5280 if (QFPR_idx != NumQFPRs) {
5281 RegsToPass.
push_back(std::make_pair(
QFPR[QFPR_idx++], Arg));
5287 true, isTailCall,
true, MemOpChains,
5288 TailCallArguments, dl);
5290 ArgOffset += (IsF32 ? 16 : 32);
5294 ArgOffset += (IsF32 ? 16 : 32);
5300 assert(NumBytesActuallyUsed == ArgOffset);
5301 (void)NumBytesActuallyUsed;
5303 if (!MemOpChains.
empty())
5309 if (!isTailCall && !IsPatchPoint &&
5311 !isa<ExternalSymbolSDNode>(Callee)) {
5325 if (isELFv2ABI && !IsPatchPoint)
5326 RegsToPass.
push_back(std::make_pair((
unsigned)PPC::X12, Callee));
5332 for (
unsigned i = 0, e = RegsToPass.
size(); i != e; ++i) {
5333 Chain = DAG.
getCopyToReg(Chain, dl, RegsToPass[i].first,
5334 RegsToPass[i].second, InFlag);
5340 FPOp,
true, TailCallArguments);
5342 return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint,
5343 hasNest, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
5344 Callee, SPDiff, NumBytes, Ins, InVals, CS);
5348 PPCTargetLowering::LowerCall_Darwin(
SDValue Chain,
SDValue Callee,
5350 bool isTailCall,
bool IsPatchPoint,
5358 unsigned NumOps = Outs.
size();
5362 unsigned PtrByteSize = isPPC64 ? 8 : 4;
5379 unsigned NumBytes = LinkageSize;
5387 unsigned nAltivecParamsAtEnd = 0;
5388 for (
unsigned i = 0; i != NumOps; ++i) {
5390 EVT ArgVT = Outs[i].VT;
5395 if (!isVarArg && !isPPC64) {
5398 nAltivecParamsAtEnd++;
5402 NumBytes = ((NumBytes+15)/16)*16;
5408 if (nAltivecParamsAtEnd) {
5409 NumBytes = ((NumBytes+15)/16)*16;
5410 NumBytes += 16*nAltivecParamsAtEnd;
5418 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
5443 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp,
true,
5459 unsigned ArgOffset = LinkageSize;
5460 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
5464 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
5467 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
5468 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
5472 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
5475 const unsigned NumFPRs = 13;
5478 const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
5484 for (
unsigned i = 0; i != NumOps; ++i) {
5510 if (Size==1 || Size==2) {
5512 if (GPR_idx != NumGPRs) {
5515 false,
false,
false, 0);
5517 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
5519 ArgOffset += PtrByteSize;
5524 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
5527 ArgOffset += PtrByteSize;
5534 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
5541 for (
unsigned j=0; j<Size; j+=PtrByteSize) {
5544 if (GPR_idx != NumGPRs) {
5547 false,
false,
false, 0);
5549 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
5550 ArgOffset += PtrByteSize;
5552 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
5564 if (GPR_idx != NumGPRs) {
5568 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Arg));
5571 isPPC64, isTailCall,
false, MemOpChains,
5572 TailCallArguments, dl);
5574 ArgOffset += PtrByteSize;
5578 if (FPR_idx != NumFPRs) {
5579 RegsToPass.
push_back(std::make_pair(
FPR[FPR_idx++], Arg));
5587 if (GPR_idx != NumGPRs) {
5592 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
5599 false,
false,
false, 0);
5601 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
5607 if (GPR_idx != NumGPRs)
5615 isPPC64, isTailCall,
false, MemOpChains,
5616 TailCallArguments, dl);
5632 while (ArgOffset % 16 !=0) {
5633 ArgOffset += PtrByteSize;
5634 if (GPR_idx != NumGPRs)
5644 if (VR_idx != NumVRs) {
5647 false,
false,
false, 0);
5649 RegsToPass.
push_back(std::make_pair(VR[VR_idx++], Load));
5652 for (
unsigned i=0; i<16; i+=PtrByteSize) {
5653 if (GPR_idx == NumGPRs)
5658 false,
false,
false, 0);
5660 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
5667 if (VR_idx != NumVRs) {
5669 RegsToPass.
push_back(std::make_pair(VR[VR_idx++], Arg));
5670 }
else if (nAltivecParamsAtEnd==0) {
5673 isPPC64, isTailCall,
true, MemOpChains,
5674 TailCallArguments, dl);
5685 if (!isVarArg && nAltivecParamsAtEnd > NumVRs) {
5688 ArgOffset = ((ArgOffset+15)/16)*16;
5690 for (
unsigned i = 0; i != NumOps; ++i) {
5692 EVT ArgType = Outs[i].VT;
5699 isPPC64, isTailCall,
true, MemOpChains,
5700 TailCallArguments, dl);
5707 if (!MemOpChains.
empty())
5715 !isa<ExternalSymbolSDNode>(Callee) &&
5717 RegsToPass.
push_back(std::make_pair((
unsigned)(isPPC64 ? PPC::X12 :
5718 PPC::R12), Callee));
5723 for (
unsigned i = 0, e = RegsToPass.
size(); i != e; ++i) {
5724 Chain = DAG.
getCopyToReg(Chain, dl, RegsToPass[i].first,
5725 RegsToPass[i].second, InFlag);
5730 PrepareTailCall(DAG, InFlag, Chain, dl, isPPC64, SPDiff, NumBytes, LROp,
5731 FPOp,
true, TailCallArguments);
5733 return FinishCall(CallConv, dl, isTailCall, isVarArg, IsPatchPoint,
5735 RegsToPass, InFlag, Chain, CallSeqStart, Callee, SPDiff,
5736 NumBytes, Ins, InVals, CS);
5745 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
5746 return CCInfo.CheckReturn(Outs, RetCC_PPC);
5750 PPCTargetLowering::LowerReturn(
SDValue Chain,
5765 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
5767 assert(VA.
isRegLoc() &&
"Can only return in registers!");
5794 RetOps.push_back(Flag);
5808 bool isPPC64 = Subtarget.
isPPC64();
5809 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1;
5819 false,
false,
false, 0);
5832 PPCTargetLowering::getReturnAddrFrameIndex(
SelectionDAG & DAG)
const {
5834 bool isPPC64 = Subtarget.
isPPC64();
5855 PPCTargetLowering::getFramePointerFrameIndex(
SelectionDAG & DAG)
const {
5857 bool isPPC64 = Subtarget.
isPPC64();
5891 SDValue FPSIdx = getFramePointerFrameIndex(DAG);
5893 SDValue Ops[3] = { Chain, NegSize, FPSIdx };
5915 return LowerVectorLoad(Op, DAG);
5918 "Custom lowering only for i1 loads");
5931 BasePtr, MVT::i8, MMO);
5940 return LowerVectorStore(Op, DAG);
5943 "Custom lowering only for i1 stores");
5957 return DAG.
getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO);
5963 "Custom lowering only for i1 results");
6070 void PPCTargetLowering::LowerFP_TO_INTForReuse(
SDValue Op, ReuseLoadInfo &RLI,
6075 if (Src.getValueType() ==
MVT::f32)
6090 "i64 FP_TO_UINT is supported only with FPCVT");
6101 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex();
6115 MPI,
false,
false, 0);
6122 MPI = MPI.getWithOffset(4);
6139 if (Src.getValueType() ==
MVT::f32)
6155 "i64 FP_TO_UINT is supported only with FPCVT");
6168 return LowerFP_TO_INTDirectMove(Op, DAG, dl);
6171 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
6174 false, RLI.IsInvariant, RLI.Alignment, RLI.AAInfo,
6186 bool PPCTargetLowering::canReuseLoadAddress(
SDValue Op,
EVT MemVT,
6197 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
6211 "Non-pre-inc AM on PPC?");
6230 void PPCTargetLowering::spliceIntoChain(
SDValue ResChain,
6236 SDLoc dl(NewResChain);
6241 "A new TF really is required here");
6255 "Invalid floating point type as target of conversion");
6257 "Int to FP conversions with direct moves require FPCVT");
6295 FPHalfs, FPHalfs, FPHalfs, FPHalfs);
6297 Value = DAG.
getNode(
ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
6318 return LowerINT_TO_FPDirectMove(Op, DAG, dl);
6321 "UINT_TO_FP is supported only with FPCVT");
6386 if (canReuseLoadAddress(SINT,
MVT::i64, RLI, DAG)) {
6387 Bits = DAG.
getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI,
false,
6388 false, RLI.IsInvariant, RLI.Alignment, RLI.AAInfo,
6390 spliceIntoChain(RLI.ResChain, Bits.
getValue(1), DAG);
6392 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG,
ISD::SEXTLOAD)) {
6395 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
6396 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
6400 spliceIntoChain(RLI.ResChain, Bits.
getValue(1), DAG);
6402 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG,
ISD::ZEXTLOAD)) {
6405 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
6406 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
6410 spliceIntoChain(RLI.ResChain, Bits.
getValue(1), DAG);
6427 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
6428 "Expected an i32 store");
6437 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
6438 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
6455 "Unhandled INT_TO_FP type in custom expander!");
6477 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
6478 "Expected an i32 store");
6488 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
6489 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
6495 spliceIntoChain(RLI.ResChain, Ld.
getValue(1), DAG);
6498 "i32->FP without LFIWAX supported only on PPC64");
6512 Ld = DAG.
getLoad(MVT::f64, dl, Store, FIdx,
6514 false,
false,
false, 0);
6568 false,
false,
false, 0);
6614 SDValue OutOps[] = { OutLo, OutHi };
6643 SDValue OutOps[] = { OutLo, OutHi };
6672 SDValue OutOps[] = { OutLo, OutHi };
6684 assert(Val >= -16 && Val <= 15 &&
"vsplti is out of range!");
6686 static const MVT VTys[] = {
6696 EVT CanonicalVT = VTys[SplatSize-1];
6733 DAG.
getConstant(IID, dl, MVT::i32), Op0, Op1, Op2);
6746 for (
unsigned i = 0; i != 16; ++i)
6761 assert(BVN &&
"Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
6774 "BUILD_VECTOR for v4i1 does not have 4 operands");
6776 bool IsConst =
true;
6777 for (
unsigned i = 0; i < 4; ++i) {
6779 if (!isa<ConstantSDNode>(BVN->
getOperand(i))) {
6792 for (
unsigned i = 0; i < 4; ++i) {
6795 else if (cast<ConstantSDNode>(BVN->
getOperand(i))->
6796 getConstantIntValue()->isZero())
6821 for (
unsigned i = 0; i < 4; ++i) {
6824 unsigned Offset = 4*i;
6829 if (StoreSize > 4) {
6847 if (!Stores.
empty())
6870 DAG.
getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32),
6875 FPZeros, FPZeros, FPZeros, FPZeros);
6885 APInt APSplatBits, APSplatUndef;
6886 unsigned SplatBitSize;
6895 unsigned SplatSize = SplatBitSize / 8;
6900 if (SplatBits == 0) {
6911 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >>
6913 if (SextVal >= -16 && SextVal <= 15)
6926 if (SextVal >= -32 && SextVal <= 31) {
6944 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
6958 static const signed char SplatCsts[] = {
6959 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
6960 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
6966 int i = SplatCsts[idx];
6970 unsigned TypeShiftAmt = i & (SplatBitSize-1);
6973 if (SextVal == (
int)((
unsigned)i << TypeShiftAmt)) {
6975 static const unsigned IIDs[] = {
6976 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
6977 Intrinsic::ppc_altivec_vslw
6984 if (SextVal == (
int)((
unsigned)i >> TypeShiftAmt)) {
6986 static const unsigned IIDs[] = {
6987 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
6988 Intrinsic::ppc_altivec_vsrw
6995 if (SextVal == (
int)((
unsigned)i >> TypeShiftAmt)) {
6997 static const unsigned IIDs[] = {
6998 Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0,
6999 Intrinsic::ppc_altivec_vsraw
7006 if (SextVal == (
int)(((
unsigned)i << TypeShiftAmt) |
7007 ((
unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
7009 static const unsigned IIDs[] = {
7010 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
7011 Intrinsic::ppc_altivec_vrlw
7018 if (SextVal == (
int)(((
unsigned)i << 8) | (i < 0 ? 0xFF : 0))) {
7024 if (SextVal == (
int)(((
unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) {
7030 if (SextVal == (
int)(((
unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
7045 unsigned OpNum = (PFEntry >> 26) & 0x0F;
7046 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
7047 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
7062 if (OpNum == OP_COPY) {
7063 if (LHSID == (1*9+2)*9+3)
return LHS;
7064 assert(LHSID == ((4*9+5)*9+6)*9+7 &&
"Illegal OP_COPY!");
7076 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3;
7077 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
7078 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7;
7079 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
7082 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
7083 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
7084 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
7085 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
7088 for (
unsigned i = 0; i != 16; ++i)
7089 ShufIdxs[i] = (i&3)+0;
7092 for (
unsigned i = 0; i != 16; ++i)
7093 ShufIdxs[i] = (i&3)+4;
7096 for (
unsigned i = 0; i != 16; ++i)
7097 ShufIdxs[i] = (i&3)+8;
7100 for (
unsigned i = 0; i != 16; ++i)
7101 ShufIdxs[i] = (i&3)+12;
7130 if (Subtarget.
hasQPX()) {
7131 if (VT.getVectorNumElements() != 4)
7137 if (AlignIdx != -1) {
7142 if (SplatIdx >= 4) {
7158 for (
unsigned i = 0; i < 4; ++i) {
7160 unsigned mm = m >= 0 ? (
unsigned) m : i;
7161 idx |= mm << (3-i)*3;
7195 unsigned int ShuffleKind = isLittleEndian ? 2 : 0;
7214 unsigned PFIndexes[4];
7215 bool isFourElementShuffle =
true;
7216 for (
unsigned i = 0; i != 4 && isFourElementShuffle; ++i) {
7218 for (
unsigned j = 0; j != 4; ++j) {
7219 if (PermMask[i*4+j] < 0)
7222 unsigned ByteSource = PermMask[i*4+j];
7223 if ((ByteSource & 3) != j) {
7224 isFourElementShuffle =
false;
7229 EltNo = ByteSource/4;
7230 }
else if (EltNo != ByteSource/4) {
7231 isFourElementShuffle =
false;
7235 PFIndexes[i] = EltNo;
7243 if (isFourElementShuffle && !isLittleEndian) {
7245 unsigned PFTableIndex =
7246 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
7249 unsigned Cost = (PFEntry >> 30);
7281 for (
unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
7282 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
7284 for (
unsigned j = 0; j != BytesPerElement; ++j)
7308 unsigned IntrinsicID =
7309 cast<ConstantSDNode>(Intrin.
getOperand(0))->getZExtValue();
7312 switch (IntrinsicID) {
7313 default:
return false;
7315 case Intrinsic::ppc_altivec_vcmpbfp_p: CompareOpc = 966; isDot = 1;
break;
7316 case Intrinsic::ppc_altivec_vcmpeqfp_p: CompareOpc = 198; isDot = 1;
break;
7317 case Intrinsic::ppc_altivec_vcmpequb_p: CompareOpc = 6; isDot = 1;
break;
7318 case Intrinsic::ppc_altivec_vcmpequh_p: CompareOpc = 70; isDot = 1;
break;
7319 case Intrinsic::ppc_altivec_vcmpequw_p: CompareOpc = 134; isDot = 1;
break;
7320 case Intrinsic::ppc_altivec_vcmpequd_p:
7329 case Intrinsic::ppc_altivec_vcmpgefp_p: CompareOpc = 454; isDot = 1;
break;
7330 case Intrinsic::ppc_altivec_vcmpgtfp_p: CompareOpc = 710; isDot = 1;
break;
7331 case Intrinsic::ppc_altivec_vcmpgtsb_p: CompareOpc = 774; isDot = 1;
break;
7332 case Intrinsic::ppc_altivec_vcmpgtsh_p: CompareOpc = 838; isDot = 1;
break;
7333 case Intrinsic::ppc_altivec_vcmpgtsw_p: CompareOpc = 902; isDot = 1;
break;
7334 case Intrinsic::ppc_altivec_vcmpgtsd_p:
7343 case Intrinsic::ppc_altivec_vcmpgtub_p: CompareOpc = 518; isDot = 1;
break;
7344 case Intrinsic::ppc_altivec_vcmpgtuh_p: CompareOpc = 582; isDot = 1;
break;
7345 case Intrinsic::ppc_altivec_vcmpgtuw_p: CompareOpc = 646; isDot = 1;
break;
7346 case Intrinsic::ppc_altivec_vcmpgtud_p:
7357 case Intrinsic::ppc_altivec_vcmpbfp: CompareOpc = 966; isDot = 0;
break;
7358 case Intrinsic::ppc_altivec_vcmpeqfp: CompareOpc = 198; isDot = 0;
break;
7359 case Intrinsic::ppc_altivec_vcmpequb: CompareOpc = 6; isDot = 0;
break;
7360 case Intrinsic::ppc_altivec_vcmpequh: CompareOpc = 70; isDot = 0;
break;
7361 case Intrinsic::ppc_altivec_vcmpequw: CompareOpc = 134; isDot = 0;
break;
7362 case Intrinsic::ppc_altivec_vcmpequd:
7371 case Intrinsic::ppc_altivec_vcmpgefp: CompareOpc = 454; isDot = 0;
break;
7372 case Intrinsic::ppc_altivec_vcmpgtfp: CompareOpc = 710; isDot = 0;
break;
7373 case Intrinsic::ppc_altivec_vcmpgtsb: CompareOpc = 774; isDot = 0;
break;
7374 case Intrinsic::ppc_altivec_vcmpgtsh: CompareOpc = 838; isDot = 0;
break;
7375 case Intrinsic::ppc_altivec_vcmpgtsw: CompareOpc = 902; isDot = 0;
break;
7376 case Intrinsic::ppc_altivec_vcmpgtsd:
7385 case Intrinsic::ppc_altivec_vcmpgtub: CompareOpc = 518; isDot = 0;
break;
7386 case Intrinsic::ppc_altivec_vcmpgtuh: CompareOpc = 582; isDot = 0;
break;
7387 case Intrinsic::ppc_altivec_vcmpgtuw: CompareOpc = 646; isDot = 0;
break;
7388 case Intrinsic::ppc_altivec_vcmpgtud:
7439 switch (cast<ConstantSDNode>(Op.
getOperand(1))->getZExtValue()) {
7442 BitNo = 0; InvertBit =
false;
7445 BitNo = 0; InvertBit =
true;
7448 BitNo = 2; InvertBit =
false;
7451 BitNo = 2; InvertBit =
true;
7508 false,
false,
false, 0);
7517 "Unknown extract_vector_elt type");
7533 FPHalfs, FPHalfs, FPHalfs, FPHalfs);
7535 Value = DAG.
getNode(
ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
7539 DAG.
getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32),
7563 unsigned Offset = 4*cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue();
7569 false,
false,
false, 0);
7599 for (
unsigned Idx = 0; Idx < 4; ++Idx) {
7601 if (ScalarVT != ScalarMemVT)
7611 DAG.
getLoad(ScalarVT, dl, LoadChain, BasePtr,
7619 "Unknown addressing mode on vector load");
7629 BasePtr.getValueType()));
7637 SDValue RetOps[] = { Value, Vals[0].getValue(1), TF };
7641 SDValue RetOps[] = { Value, TF };
7646 assert(LN->
isUnindexed() &&
"Indexed v4i1 loads are not supported");
7652 for (
unsigned i = 0; i < 4; ++i) {
7654 Idx = DAG.
getNode(
ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx);
7657 dl, MVT::i32, LoadChain, Idx,
7663 VectElmtChains.push_back(VectElmts[i].getValue(1));
7669 SDValue RVals[] = { Value, LoadChain };
7696 for (
unsigned Idx = 0; Idx < 4; ++Idx) {
7701 if (ScalarVT != ScalarMemVT)
7709 DAG.
getStore(StoreChain, dl, Ex, BasePtr,
7716 "Unknown addressing mode on vector store");
7730 SDValue RetOps[] = { TF, Stores[0].getValue(1) };
7737 assert(SN->
isUnindexed() &&
"Indexed v4i1 stores are not supported");
7749 FPHalfs, FPHalfs, FPHalfs, FPHalfs);
7751 Value = DAG.
getNode(
ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
7755 DAG.
getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32),
7779 for (
unsigned i = 0; i < 4; ++i) {
7780 unsigned Offset = 4*i;
7786 false,
false,
false, 0));
7787 LoadChains.
push_back(Loads[i].getValue(1));
7793 for (
unsigned i = 0; i < 4; ++i) {
7842 LHS, RHS, Zero, DAG, dl);
7862 for (
unsigned i = 0; i != 8; ++i) {
7863 if (isLittleEndian) {
7865 Ops[i*2+1] = 2*i+16;
7868 Ops[i*2+1] = 2*i+1+16;
7894 return LowerVASTART(Op, DAG, Subtarget);
7897 return LowerVAARG(Op, DAG, Subtarget);
7900 return LowerVACOPY(Op, DAG, Subtarget);
7904 return LowerDYNAMIC_STACKALLOC(Op, DAG, Subtarget);
7909 case ISD::LOAD:
return LowerLOAD(Op, DAG);
7932 case ISD::MUL:
return LowerMUL(Op, DAG);
7949 llvm_unreachable(
"Do not know how to custom type legalize this operation!");
7960 if (cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue() !=
7961 Intrinsic::ppc_is_decremented_ctr_nonzero)
7965 "Unexpected result type for CTR decrement intrinsic");
8034 bool IsLoad)
const {
8044 bool IsLoad)
const {
8056 unsigned AtomicSize,
8057 unsigned BinOpcode)
const {
8061 auto LoadMnemonic = PPC::LDARX;
8062 auto StoreMnemonic = PPC::STDCX;
8063 switch (AtomicSize) {
8067 LoadMnemonic = PPC::LBARX;
8068 StoreMnemonic = PPC::STBCX;
8072 LoadMnemonic = PPC::LHARX;
8073 StoreMnemonic = PPC::STHCX;
8077 LoadMnemonic = PPC::LWARX;
8078 StoreMnemonic = PPC::STWCX;
8081 LoadMnemonic = PPC::LDARX;
8082 StoreMnemonic = PPC::STDCX;
8106 unsigned TmpReg = (!BinOpcode) ? incr :
8108 : &PPC::GPRCRegClass);
8122 BuildMI(BB, dl, TII->
get(LoadMnemonic), dest)
8123 .addReg(ptrA).
addReg(ptrB);
8143 unsigned BinOpcode)
const {
8154 bool is64bit = Subtarget.
isPPC64();
8155 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
8178 : &PPC::GPRCRegClass;
8217 if (ptrA != ZeroReg) {
8219 BuildMI(BB, dl, TII->
get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
8220 .addReg(ptrA).
addReg(ptrB);
8224 BuildMI(BB, dl, TII->
get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg)
8226 BuildMI(BB, dl, TII->
get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg)
8227 .addReg(Shift1Reg).
addImm(is8bit ? 24 : 16);
8229 BuildMI(BB, dl, TII->
get(PPC::RLDICR), PtrReg)
8232 BuildMI(BB, dl, TII->
get(PPC::RLWINM), PtrReg)
8234 BuildMI(BB, dl, TII->
get(PPC::SLW), Incr2Reg)
8235 .addReg(incr).
addReg(ShiftReg);
8237 BuildMI(BB, dl, TII->
get(PPC::LI), Mask2Reg).addImm(255);
8239 BuildMI(BB, dl, TII->
get(PPC::LI), Mask3Reg).addImm(0);
8240 BuildMI(BB, dl, TII->
get(PPC::ORI),Mask2Reg).addReg(Mask3Reg).
addImm(65535);
8243 .addReg(Mask2Reg).
addReg(ShiftReg);
8246 BuildMI(BB, dl, TII->
get(PPC::LWARX), TmpDestReg)
8247 .addReg(ZeroReg).
addReg(PtrReg);
8250 .addReg(Incr2Reg).
addReg(TmpDestReg);
8251 BuildMI(BB, dl, TII->
get(is64bit ? PPC::ANDC8 : PPC::ANDC), Tmp2Reg)
8252 .addReg(TmpDestReg).
addReg(MaskReg);
8254 .addReg(TmpReg).
addReg(MaskReg);
8256 .addReg(Tmp3Reg).
addReg(Tmp2Reg);
8267 BuildMI(*BB, BB->
begin(), dl, TII->
get(PPC::SRW), dest).addReg(TmpDestReg)
8291 assert(RC->
hasType(MVT::i32) &&
"Invalid destination!");
8296 assert((PVT ==
MVT::i64 || PVT == MVT::i32) &&
8297 "Invalid Pointer Size!");
8350 MIB =
BuildMI(*thisMBB, MI, DL, TII->
get(PPC::STD))
8361 BaseReg = Subtarget.
isPPC64() ? PPC::X1 : PPC::R1;
8363 BaseReg = Subtarget.
isPPC64() ? PPC::BP8 : PPC::BP;
8365 MIB =
BuildMI(*thisMBB, MI, DL,
8366 TII->
get(Subtarget.
isPPC64() ? PPC::STD : PPC::STW))
8373 MIB =
BuildMI(*thisMBB, MI, DL, TII->
get(PPC::BCLalways)).addMBB(mainMBB);
8377 BuildMI(*thisMBB, MI, DL, TII->
get(PPC::LI), restoreDstReg).addImm(1);
8379 MIB =
BuildMI(*thisMBB, MI, DL, TII->
get(PPC::EH_SjLj_Setup))
8381 MIB =
BuildMI(*thisMBB, MI, DL, TII->
get(PPC::B)).addMBB(sinkMBB);
8390 TII->
get(Subtarget.
isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg);
8394 MIB =
BuildMI(mainMBB, DL, TII->
get(PPC::STD))
8399 MIB =
BuildMI(mainMBB, DL, TII->
get(PPC::STW))
8407 BuildMI(mainMBB, DL, TII->
get(PPC::LI), mainDstReg).addImm(0);
8413 .addReg(mainDstReg).
addMBB(mainMBB)
8434 assert((PVT ==
MVT::i64 || PVT == MVT::i32) &&
8435 "Invalid Pointer Size!");
8438 (PVT ==
MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
8441 unsigned FP = (PVT ==
MVT::i64) ? PPC::X31 : PPC::R31;
8442 unsigned SP = (PVT ==
MVT::i64) ? PPC::X1 : PPC::R1;
8468 MIB =
BuildMI(*MBB, MI, DL, TII->
get(PPC::LWZ), FP)
8477 .addImm(LabelOffset)
8480 MIB =
BuildMI(*MBB, MI, DL, TII->
get(PPC::LWZ), Tmp)
8481 .addImm(LabelOffset)
8548 if (MI->
getOpcode() == PPC::EH_SjLj_SetJmp32 ||
8549 MI->
getOpcode() == PPC::EH_SjLj_SetJmp64) {
8551 }
else if (MI->
getOpcode() == PPC::EH_SjLj_LongJmp32 ||
8552 MI->
getOpcode() == PPC::EH_SjLj_LongJmp64) {
8571 if (MI->
getOpcode() == PPC::SELECT_CC_I4 ||
8582 }
else if (MI->
getOpcode() == PPC::SELECT_CC_I4 ||
8586 MI->
getOpcode() == PPC::SELECT_CC_QFRC ||
8587 MI->
getOpcode() == PPC::SELECT_CC_QSRC ||
8588 MI->
getOpcode() == PPC::SELECT_CC_QBRC ||
8589 MI->
getOpcode() == PPC::SELECT_CC_VRRC ||
8590 MI->
getOpcode() == PPC::SELECT_CC_VSFRC ||
8591 MI->
getOpcode() == PPC::SELECT_CC_VSSRC ||
8592 MI->
getOpcode() == PPC::SELECT_CC_VSRC ||
8630 if (MI->
getOpcode() == PPC::SELECT_I4 ||
8665 }
else if (MI->
getOpcode() == PPC::ReadTB) {
8697 BuildMI(BB, dl, TII->
get(PPC::MFSPR), HiReg).addImm(269);
8698 BuildMI(BB, dl, TII->
get(PPC::MFSPR), LoReg).addImm(268);
8699 BuildMI(BB, dl, TII->
get(PPC::MFSPR), ReadAgainReg).addImm(269);
8704 .addReg(HiReg).
addReg(ReadAgainReg);
8711 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
8713 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
8715 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
8717 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
8720 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
8722 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
8724 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
8726 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
8729 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
8731 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
8733 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
8735 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
8738 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
8740 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
8742 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
8744 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
8747 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
8749 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
8751 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
8753 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
8756 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
8758 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
8760 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
8762 else if (MI->
getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
8765 else if (MI->
getOpcode() == PPC::ATOMIC_SWAP_I8)
8767 else if (MI->
getOpcode() == PPC::ATOMIC_SWAP_I16)
8769 else if (MI->
getOpcode() == PPC::ATOMIC_SWAP_I32)
8771 else if (MI->
getOpcode() == PPC::ATOMIC_SWAP_I64)
8774 else if (MI->
getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
8775 MI->
getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 ||
8777 MI->
getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) ||
8779 MI->
getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) {
8780 bool is64bit = MI->
getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
8782 auto LoadMnemonic = PPC::LDARX;
8783 auto StoreMnemonic = PPC::STDCX;
8787 case PPC::ATOMIC_CMP_SWAP_I8:
8788 LoadMnemonic = PPC::LBARX;
8789 StoreMnemonic = PPC::STBCX;
8792 case PPC::ATOMIC_CMP_SWAP_I16:
8793 LoadMnemonic = PPC::LHARX;
8794 StoreMnemonic = PPC::STHCX;
8797 case PPC::ATOMIC_CMP_SWAP_I32:
8798 LoadMnemonic = PPC::LWARX;
8799 StoreMnemonic = PPC::STWCX;
8801 case PPC::ATOMIC_CMP_SWAP_I64:
8802 LoadMnemonic = PPC::LDARX;
8803 StoreMnemonic = PPC::STDCX;
8842 BuildMI(BB, dl, TII->
get(LoadMnemonic), dest)
8843 .addReg(ptrA).
addReg(ptrB);
8844 BuildMI(BB, dl, TII->
get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0)
8845 .addReg(oldval).
addReg(dest);
8856 BuildMI(BB, dl, TII->
get(PPC::B)).addMBB(exitMBB);
8868 }
else if (MI->
getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
8869 MI->
getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
8873 bool is64bit = Subtarget.
isPPC64();
8874 bool is8bit = MI->
getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
8897 : &PPC::GPRCRegClass;
8913 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
8946 if (ptrA != ZeroReg) {
8948 BuildMI(BB, dl, TII->
get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
8949 .addReg(ptrA).
addReg(ptrB);
8953 BuildMI(BB, dl, TII->
get(PPC::RLWINM), Shift1Reg).addReg(Ptr1Reg)
8955 BuildMI(BB, dl, TII->
get(is64bit ? PPC::XORI8 : PPC::XORI), ShiftReg)
8956 .addReg(Shift1Reg).
addImm(is8bit ? 24 : 16);
8958 BuildMI(BB, dl, TII->
get(PPC::RLDICR), PtrReg)
8961 BuildMI(BB, dl, TII->
get(PPC::RLWINM), PtrReg)
8963 BuildMI(BB, dl, TII->
get(PPC::SLW), NewVal2Reg)
8964 .addReg(newval).
addReg(ShiftReg);
8965 BuildMI(BB, dl, TII->
get(PPC::SLW), OldVal2Reg)
8966 .addReg(oldval).
addReg(ShiftReg);
8968 BuildMI(BB, dl, TII->
get(PPC::LI), Mask2Reg).addImm(255);
8970 BuildMI(BB, dl, TII->
get(PPC::LI), Mask3Reg).addImm(0);
8971 BuildMI(BB, dl, TII->
get(PPC::ORI), Mask2Reg)
8972 .addReg(Mask3Reg).
addImm(65535);
8975 .addReg(Mask2Reg).
addReg(ShiftReg);
8977 .addReg(NewVal2Reg).
addReg(MaskReg);
8979 .addReg(OldVal2Reg).
addReg(MaskReg);
8982 BuildMI(BB, dl, TII->
get(PPC::LWARX), TmpDestReg)
8983 .addReg(ZeroReg).
addReg(PtrReg);
8985 .addReg(TmpDestReg).
addReg(MaskReg);
8986 BuildMI(BB, dl, TII->
get(PPC::CMPW), PPC::CR0)
8987 .addReg(TmpReg).
addReg(OldVal3Reg);
8995 .addReg(TmpDestReg).
addReg(MaskReg);
8997 .addReg(Tmp2Reg).
addReg(NewVal3Reg);
8998 BuildMI(BB, dl, TII->
get(PPC::STWCX)).addReg(Tmp4Reg)
9002 BuildMI(BB, dl, TII->
get(PPC::B)).addMBB(exitMBB);
9007 BuildMI(BB, dl, TII->
get(PPC::STWCX)).addReg(TmpDestReg)
9016 }
else if (MI->
getOpcode() == PPC::FADDrtz) {
9032 BuildMI(*BB, MI, dl, TII->
get(PPC::MTFSB1)).addImm(31);
9033 BuildMI(*BB, MI, dl, TII->
get(PPC::MTFSB0)).addImm(30);
9042 MI->
getOpcode() == PPC::ANDIo_1_EQ_BIT8 ||
9043 MI->
getOpcode() == PPC::ANDIo_1_GT_BIT8) {
9044 unsigned Opcode = (MI->
getOpcode() == PPC::ANDIo_1_EQ_BIT8 ||
9045 MI->
getOpcode() == PPC::ANDIo_1_GT_BIT8) ?
9046 PPC::ANDIo8 : PPC::ANDIo;
9048 MI->
getOpcode() == PPC::ANDIo_1_EQ_BIT8);
9052 &PPC::GPRCRegClass :
9053 &PPC::G8RCRegClass);
9060 .addReg(isEQ ? PPC::CR0EQ : PPC::CR0GT);
9061 }
else if (MI->
getOpcode() == PPC::TCHECK_RET) {
9065 BuildMI(*BB, MI, Dl, TII->
get(PPC::TCHECK), CRReg);
9080 std::string RecipOp(Base);
9087 RecipOp =
"vec-" + RecipOp;
9093 DAGCombinerInfo &DCI,
9094 unsigned &RefinementSteps,
9095 bool &UseOneConstNR)
const {
9098 (VT == MVT::f64 && Subtarget.
hasFRSQRTE()) ||
9102 (VT == MVT::v4f64 && Subtarget.
hasQPX())) {
9103 TargetRecip Recips = DCI.DAG.getTarget().Options.Reciprocals;
9104 std::string RecipOp =
getRecipOp(
"sqrt", VT);
9109 UseOneConstNR =
true;
9116 DAGCombinerInfo &DCI,
9117 unsigned &RefinementSteps)
const {
9120 (VT == MVT::f64 && Subtarget.
hasFRE()) ||
9124 (VT == MVT::v4f64 && Subtarget.
hasQPX())) {
9125 TargetRecip Recips = DCI.DAG.getTarget().Options.Reciprocals;
9136 bool PPCTargetLowering::combineRepeatedFPDivisors(
unsigned NumUsers)
const {
9149 return NumUsers > 2;
9154 return NumUsers > 1;
9159 unsigned Bytes,
int Dist,
9169 int FI = cast<FrameIndexSDNode>(Loc)->getIndex();
9170 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
9173 if (FS != BFS || FS != (
int)Bytes)
return false;
9179 cast<ConstantSDNode>(Loc.
getOperand(1))->getSExtValue() == Dist*Bytes)
9185 int64_t Offset1 = 0;
9186 int64_t Offset2 = 0;
9189 if (isGA1 && isGA2 && GV1 == GV2)
9190 return Offset1 == (Offset2 + Dist*Bytes);
9197 unsigned Bytes,
int Dist,
9200 EVT VT =
LS->getMemoryVT();
9207 switch (cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue()) {
9208 default:
return false;
9209 case Intrinsic::ppc_qpx_qvlfd:
9210 case Intrinsic::ppc_qpx_qvlfda:
9213 case Intrinsic::ppc_qpx_qvlfs:
9214 case Intrinsic::ppc_qpx_qvlfsa:
9217 case Intrinsic::ppc_qpx_qvlfcd:
9218 case Intrinsic::ppc_qpx_qvlfcda:
9221 case Intrinsic::ppc_qpx_qvlfcs:
9222 case Intrinsic::ppc_qpx_qvlfcsa:
9225 case Intrinsic::ppc_qpx_qvlfiwa:
9226 case Intrinsic::ppc_qpx_qvlfiwz:
9227 case Intrinsic::ppc_altivec_lvx:
9228 case Intrinsic::ppc_altivec_lvxl:
9229 case Intrinsic::ppc_vsx_lxvw4x:
9232 case Intrinsic::ppc_vsx_lxvd2x:
9235 case Intrinsic::ppc_altivec_lvebx:
9238 case Intrinsic::ppc_altivec_lvehx:
9241 case Intrinsic::ppc_altivec_lvewx:
9251 switch (cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue()) {
9252 default:
return false;
9253 case Intrinsic::ppc_qpx_qvstfd:
9254 case Intrinsic::ppc_qpx_qvstfda:
9257 case Intrinsic::ppc_qpx_qvstfs:
9258 case Intrinsic::ppc_qpx_qvstfsa:
9261 case Intrinsic::ppc_qpx_qvstfcd:
9262 case Intrinsic::ppc_qpx_qvstfcda:
9265 case Intrinsic::ppc_qpx_qvstfcs:
9266 case Intrinsic::ppc_qpx_qvstfcsa:
9269 case Intrinsic::ppc_qpx_qvstfiw:
9270 case Intrinsic::ppc_qpx_qvstfiwa:
9271 case Intrinsic::ppc_altivec_stvx:
9272 case Intrinsic::ppc_altivec_stvxl:
9273 case Intrinsic::ppc_vsx_stxvw4x:
9276 case Intrinsic::ppc_vsx_stxvd2x:
9279 case Intrinsic::ppc_altivec_stvebx:
9282 case Intrinsic::ppc_altivec_stvehx:
9285 case Intrinsic::ppc_altivec_stvewx:
9312 while (!Queue.empty()) {
9313 SDNode *ChainNext = Queue.pop_back_val();
9314 if (!Visited.
insert(ChainNext).second)
9317 if (
MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) {
9321 if (!Visited.count(ChainLD->getChain().getNode()))
9322 Queue.push_back(ChainLD->getChain().getNode());
9324 for (
const SDUse &O : ChainNext->
ops())
9325 if (!Visited.count(O.getNode()))
9326 Queue.push_back(O.getNode());
9328 LoadRoots.
insert(ChainNext);
9340 IE = LoadRoots.end();
I !=
IE; ++
I) {
9341 Queue.push_back(*
I);
9343 while (!Queue.empty()) {
9344 SDNode *LoadRoot = Queue.pop_back_val();
9345 if (!Visited.
insert(LoadRoot).second)
9348 if (
MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot))
9353 UE = LoadRoot->
use_end(); UI != UE; ++UI)
9354 if (((isa<MemSDNode>(*UI) &&
9355 cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) ||
9357 Queue.push_back(*UI);
9364 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(
SDNode *N,
9365 DAGCombinerInfo &DCI)
const {
9369 assert(Subtarget.
useCRBits() &&
"Expecting to be tracking CR bits");
9409 APInt Op1Zero, Op1One;
9410 APInt Op2Zero, Op2One;
9419 if (Op1Zero != Op2Zero || Op1One != Op2One)
9454 for (
unsigned i = 0; i < 2; ++i) {
9470 while (!BinOps.
empty()) {
9479 for (
unsigned i = 0, ie = BinOp.
getNumOperands(); i != ie; ++i) {
9513 for (
unsigned i = 0, ie = Inputs.
size(); i != ie; ++i) {
9514 if (isa<ConstantSDNode>(Inputs[i]))
9518 UE = Inputs[i].getNode()->use_end();
9521 if (User != N && !Visited.
count(User))
9540 for (
unsigned i = 0, ie = PromOps.
size(); i != ie; ++i) {
9542 UE = PromOps[i].getNode()->use_end();
9545 if (User != N && !Visited.
count(User))
9565 for (
unsigned i = 0, ie = Inputs.
size(); i != ie; ++i) {
9568 if (isa<ConstantSDNode>(Inputs[i]))
9579 while (!PromOps.
empty()) {
9587 if (!isa<ConstantSDNode>(PromOp.
getOperand(0)) &&
9595 if (isa<ConstantSDNode>(RepValue))
9604 default: C = 0;
break;
9609 if ((!isa<ConstantSDNode>(PromOp.
getOperand(C)) &&
9611 (!isa<ConstantSDNode>(PromOp.
getOperand(C+1)) &&
9625 for (
unsigned i = 0; i < 2; ++i)
9626 if (isa<ConstantSDNode>(Ops[C+i]))
9642 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(
SDNode *N,
9643 DAGCombinerInfo &DCI)
const {
9682 while (!BinOps.
empty()) {
9691 for (
unsigned i = 0, ie = BinOp.
getNumOperands(); i != ie; ++i) {
9722 for (
unsigned i = 0, ie = Inputs.
size(); i != ie; ++i) {
9723 if (isa<ConstantSDNode>(Inputs[i]))
9727 UE = Inputs[i].getNode()->use_end();
9730 if (User != N && !Visited.count(User))
9737 SelectTruncOp[0].insert(std::make_pair(User,
9741 SelectTruncOp[0].insert(std::make_pair(User,
9744 SelectTruncOp[1].insert(std::make_pair(User,
9750 for (
unsigned i = 0, ie = PromOps.
size(); i != ie; ++i) {
9752 UE = PromOps[i].getNode()->use_end();
9755 if (User != N && !Visited.count(User))
9762 SelectTruncOp[0].insert(std::make_pair(User,
9766 SelectTruncOp[0].insert(std::make_pair(User,
9769 SelectTruncOp[1].insert(std::make_pair(User,
9776 bool ReallyNeedsExt =
false;
9780 for (
unsigned i = 0, ie = Inputs.
size(); i != ie; ++i) {
9781 if (isa<ConstantSDNode>(Inputs[i]))
9785 Inputs[i].getOperand(0).getValueSizeInBits();
9786 assert(PromBits < OpBits &&
"Truncation not to a smaller bit count?");
9791 OpBits-PromBits))) ||
9794 (OpBits-(PromBits-1)))) {
9795 ReallyNeedsExt =
true;
9803 for (
unsigned i = 0, ie = Inputs.
size(); i != ie; ++i) {
9807 if (isa<ConstantSDNode>(Inputs[i]))
9810 SDValue InSrc = Inputs[i].getOperand(0);
9828 while (!PromOps.
empty()) {
9834 default: C = 0;
break;
9839 if ((!isa<ConstantSDNode>(PromOp.
getOperand(C)) &&
9841 (!isa<ConstantSDNode>(PromOp.
getOperand(C+1)) &&
9855 if ((SelectTruncOp[0].count(PromOp.
getNode()) &&
9857 (SelectTruncOp[1].count(PromOp.
getNode()) &&
9868 for (
unsigned i = 0; i < 2; ++i) {
9869 if (!isa<ConstantSDNode>(Ops[C+i]))
9886 auto SI0 = SelectTruncOp[0].
find(PromOp.
getNode());
9887 if (SI0 != SelectTruncOp[0].
end())
9889 auto SI1 = SelectTruncOp[1].
find(PromOp.
getNode());
9890 if (SI1 != SelectTruncOp[1].
end())
9899 if (!ReallyNeedsExt)
9911 "Invalid extension type");
9921 DAGCombinerInfo &DCI)
const {
9924 "Need an int -> FP conversion node here");
9947 "UINT_TO_FP is supported only with FPCVT");
9968 DCI.AddToWorklist(Src.
getNode());
9984 DCI.AddToWorklist(FP.
getNode());
10030 SDValue LoadOps[] = { Chain, Base };
10033 LoadOps, VecTy, MMO);
10086 SDValue StoreOps[] = { Chain, Swap, Base };
10089 StoreOps, VecTy, MMO);
10102 if (C->isNullValue())
10108 if (C->isNullValue())
10114 if (C->isNullValue() ||
10115 C->isAllOnesValue())
10122 return DAGCombineExtBoolTrunc(N, DCI);
10126 return DAGCombineTruncBoolExt(N, DCI);
10129 return combineFPToIntToFP(N, DCI);
10132 if (Subtarget.
hasSTFIWX() && !cast<StoreSDNode>(
N)->isTruncatingStore() &&
10151 cast<StoreSDNode>(
N)->getMemoryVT(),
10152 cast<StoreSDNode>(
N)->getMemOperand());
10158 if (cast<StoreSDNode>(N)->isUnindexed() &&
10176 Ops, cast<StoreSDNode>(
N)->getMemoryVT(),
10177 cast<StoreSDNode>(
N)->getMemOperand());
10196 if (VT.isSimple()) {
10197 MVT LoadVT = VT.getSimpleVT();
10247 MVT PermCntlTy, PermTy, LDTy;
10249 Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr :
10250 Intrinsic::ppc_altivec_lvsl;
10251 IntrLD = Intrinsic::ppc_altivec_lvx;
10252 IntrPerm = Intrinsic::ppc_altivec_vperm;
10257 Intr = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld :
10258 Intrinsic::ppc_qpx_qvlpcls;
10259 IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd :
10260 Intrinsic::ppc_qpx_qvlfs;
10261 IntrPerm = Intrinsic::ppc_qpx_qvfperm;
10282 SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr };
10286 BaseLoadOps, LDTy, BaseMMO);
10294 int IncOffset = VT.getSizeInBits() / 8;
10295 int IncValue = IncOffset;
10312 SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr };
10316 ExtraLoadOps, LDTy, ExtraMMO);
10327 if (isLittleEndian)
10329 ExtraLoad, BaseLoad, PermCntl, DAG, dl);
10332 BaseLoad, ExtraLoad, PermCntl, DAG, dl);
10351 unsigned IID = cast<ConstantSDNode>(N->
getOperand(0))->getZExtValue();
10352 Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr
10353 : Intrinsic::ppc_altivec_lvsl);
10354 if ((IID == Intr ||
10355 IID == Intrinsic::ppc_qpx_qvlpcld ||
10356 IID == Intrinsic::ppc_qpx_qvlpcls) &&
10360 int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ?
10373 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) {
10383 if (isa<ConstantSDNode>(Add->
getOperand(1))) {
10386 UE = BasePtr->
use_end(); UI != UE; ++UI) {
10387 if (UI->getOpcode() ==
ISD::ADD &&
10388 isa<ConstantSDNode>(UI->getOperand(1)) &&
10389 (cast<ConstantSDNode>(Add->
getOperand(1))->getZExtValue() -
10390 cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) %
10391 (1ULL << Bits) == 0) {
10396 cast<ConstantSDNode>(
VI->getOperand(0))->getZExtValue() == IID) {
10410 switch (cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue()) {
10413 case Intrinsic::ppc_vsx_lxvw4x:
10414 case Intrinsic::ppc_vsx_lxvd2x:
10423 switch (cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue()) {
10426 case Intrinsic::ppc_vsx_stxvw4x:
10427 case Intrinsic::ppc_vsx_stxvd2x:
10482 SDNode *VCMPoNode =
nullptr;
10503 SDNode *FlagUser =
nullptr;
10505 FlagUser ==
nullptr; ++UI) {
10506 assert(UI != VCMPoNode->
use_end() &&
"Didn't find user!");
10508 for (
unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
10509 if (User->getOperand(i) ==
SDValue(VCMPoNode, 1)) {
10519 return SDValue(VCMPoNode, 0);
10528 cast<ConstantSDNode>(Cond.
getOperand(1))->getZExtValue() ==
10529 Intrinsic::ppc_is_decremented_ctr_nonzero) {
10535 "Counter decrement has more than one use");
10555 Intrinsic::ppc_is_decremented_ctr_nonzero &&
10557 !cast<ConstantSDNode>(LHS.
getOperand(1))->getConstantIntValue()->
10562 cast<ConstantSDNode>(LHS.
getOperand(1))->getZExtValue() ==
10563 Intrinsic::ppc_is_decremented_ctr_nonzero &&
10564 isa<ConstantSDNode>(RHS)) {
10566 "Counter decrement comparison is not EQ or NE");
10568 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
10576 "Counter decrement has more than one use");
10588 assert(isDot &&
"Can't compare against a vector result!");
10592 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
10593 if (Val != 0 && Val != 1) {
10601 bool BranchOnWhenPredTrue = (CC ==
ISD::SETEQ) ^ (Val == 0);
10614 switch (cast<ConstantSDNode>(LHS.
getOperand(1))->getZExtValue()) {
10645 std::vector<SDNode *> *Created)
const {
10650 if ((VT != MVT::i32 && VT !=
MVT::i64) ||
10651 !(Divisor.
isPowerOf2() || (-Divisor).isPowerOf2()))
10657 bool IsNegPow2 = (-Divisor).isPowerOf2();
10663 Created->push_back(Op.
getNode());
10668 Created->push_back(Op.
getNode());
10682 unsigned Depth)
const {
10689 KnownZero = 0xFFFF0000;
10693 switch (cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue()) {
10695 case Intrinsic::ppc_altivec_vcmpbfp_p:
10696 case Intrinsic::ppc_altivec_vcmpeqfp_p:
10697 case Intrinsic::ppc_altivec_vcmpequb_p:
10698 case Intrinsic::ppc_altivec_vcmpequh_p:
10699 case Intrinsic::ppc_altivec_vcmpequw_p:
10700 case Intrinsic::ppc_altivec_vcmpequd_p:
10701 case Intrinsic::ppc_altivec_vcmpgefp_p:
10702 case Intrinsic::ppc_altivec_vcmpgtfp_p:
10703 case Intrinsic::ppc_altivec_vcmpgtsb_p:
10704 case Intrinsic::ppc_altivec_vcmpgtsh_p:
10705 case Intrinsic::ppc_altivec_vcmpgtsw_p:
10706 case Intrinsic::ppc_altivec_vcmpgtsd_p:
10707 case Intrinsic::ppc_altivec_vcmpgtub_p:
10708 case Intrinsic::ppc_altivec_vcmpgtuh_p:
10709 case Intrinsic::ppc_altivec_vcmpgtuw_p:
10710 case Intrinsic::ppc_altivec_vcmpgtud_p:
10736 uint64_t LoopSize = 0;
10738 for (
auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J)
10741 if (LoopSize > 16 && LoopSize <= 32)
10755 if (Constraint.
size() == 1) {
10756 switch (Constraint[0]) {
10773 }
else if (Constraint ==
"wc") {
10775 }
else if (Constraint ==
"wa" || Constraint ==
"wd" ||
10776 Constraint ==
"wf" || Constraint ==
"ws") {
10792 if (!CallOperandVal)
10799 else if ((
StringRef(constraint) ==
"wa" ||
10807 switch (*constraint) {
10837 std::pair<unsigned, const TargetRegisterClass *>
10841 if (Constraint.
size() == 1) {
10843 switch (Constraint[0]) {
10846 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass);
10847 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass);
10850 return std::make_pair(0U, &PPC::G8RCRegClass);
10851 return std::make_pair(0U, &PPC::GPRCRegClass);
10853 if (VT ==
MVT::f32 || VT == MVT::i32)
10854 return std::make_pair(0U, &PPC::F4RCRegClass);
10855 if (VT == MVT::f64 || VT ==
MVT::i64)
10856 return std::make_pair(0U, &PPC::F8RCRegClass);
10857 if (VT == MVT::v4f64 && Subtarget.
hasQPX())
10858 return std::make_pair(0U, &PPC::QFRCRegClass);
10860 return std::make_pair(0U, &PPC::QSRCRegClass);
10863 if (VT == MVT::v4f64 && Subtarget.
hasQPX())
10864 return std::make_pair(0U, &PPC::QFRCRegClass);
10866 return std::make_pair(0U, &PPC::QSRCRegClass);
10867 return std::make_pair(0U, &PPC::VRRCRegClass);
10869 return std::make_pair(0U, &PPC::CRRCRegClass);
10871 }
else if (Constraint ==
"wc") {
10872 return std::make_pair(0U, &PPC::CRBITRCRegClass);
10873 }
else if (Constraint ==
"wa" || Constraint ==
"wd" ||
10874 Constraint ==
"wf") {
10875 return std::make_pair(0U, &PPC::VSRCRegClass);
10876 }
else if (Constraint ==
"ws") {
10878 return std::make_pair(0U, &PPC::VSSRCRegClass);
10880 return std::make_pair(0U, &PPC::VSFRCRegClass);
10883 std::pair<unsigned, const TargetRegisterClass *> R =
10893 PPC::GPRCRegClass.contains(R.first))
10895 PPC::sub_32, &PPC::G8RCRegClass),
10896 &PPC::G8RCRegClass);
10900 R.first = PPC::CR0;
10901 R.second = &PPC::CRRCRegClass;
10911 std::string &Constraint,
10912 std::vector<SDValue>&Ops,
10917 if (Constraint.length() > 1)
return;
10919 char Letter = Constraint[0];
10943 if (isShiftedUInt<16, 16>(Value))
10947 if (isShiftedInt<16, 16>(Value))
10976 Ops.push_back(Result);
10988 unsigned AS)
const {
11002 switch (AM.
Scale) {
11033 unsigned Depth = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
11039 bool isPPC64 = Subtarget.
isPPC64();
11043 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
11053 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
11061 unsigned Depth = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
11074 FrameReg = isPPC64 ? PPC::X1 : PPC::R1;
11076 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP;
11091 bool isPPC64 = Subtarget.
isPPC64();
11094 if ((isPPC64 && VT !=
MVT::i64 && VT != MVT::i32) ||
11095 (!isPPC64 && VT != MVT::i32))
11100 .Case(
"r1", is64Bit ? PPC::X1 : PPC::R1)
11101 .
Case(
"r2", (isDarwinABI || isPPC64) ? 0 : PPC::R2)
11102 .
Case(
"r13", (!isPPC64 && isDarwinABI) ? 0 :
11103 (is64Bit ? PPC::X13 : PPC::R13))
11119 unsigned Intrinsic)
const {
11121 switch (Intrinsic) {
11122 case Intrinsic::ppc_qpx_qvlfd:
11123 case Intrinsic::ppc_qpx_qvlfs:
11124 case Intrinsic::ppc_qpx_qvlfcd:
11125 case Intrinsic::ppc_qpx_qvlfcs:
11126 case Intrinsic::ppc_qpx_qvlfiwa:
11127 case Intrinsic::ppc_qpx_qvlfiwz:
11128 case Intrinsic::ppc_altivec_lvx:
11129 case Intrinsic::ppc_altivec_lvxl:
11130 case Intrinsic::ppc_altivec_lvebx:
11131 case Intrinsic::ppc_altivec_lvehx:
11132 case Intrinsic::ppc_altivec_lvewx:
11133 case Intrinsic::ppc_vsx_lxvd2x:
11134 case Intrinsic::ppc_vsx_lxvw4x: {
11136 switch (Intrinsic) {
11137 case Intrinsic::ppc_altivec_lvebx:
11140 case Intrinsic::ppc_altivec_lvehx:
11143 case Intrinsic::ppc_altivec_lvewx:
11146 case Intrinsic::ppc_vsx_lxvd2x:
11149 case Intrinsic::ppc_qpx_qvlfd:
11152 case Intrinsic::ppc_qpx_qvlfs:
11155 case Intrinsic::ppc_qpx_qvlfcd:
11158 case Intrinsic::ppc_qpx_qvlfcs:
11177 case Intrinsic::ppc_qpx_qvlfda:
11178 case Intrinsic::ppc_qpx_qvlfsa:
11179 case Intrinsic::ppc_qpx_qvlfcda:
11180 case Intrinsic::ppc_qpx_qvlfcsa:
11181 case Intrinsic::ppc_qpx_qvlfiwaa:
11182 case Intrinsic::ppc_qpx_qvlfiwza: {
11184 switch (Intrinsic) {
11185 case Intrinsic::ppc_qpx_qvlfda:
11188 case Intrinsic::ppc_qpx_qvlfsa:
11191 case Intrinsic::ppc_qpx_qvlfcda:
11194 case Intrinsic::ppc_qpx_qvlfcsa:
11213 case Intrinsic::ppc_qpx_qvstfd:
11214 case Intrinsic::ppc_qpx_qvstfs:
11215 case Intrinsic::ppc_qpx_qvstfcd:
11216 case Intrinsic::ppc_qpx_qvstfcs:
11217 case Intrinsic::ppc_qpx_qvstfiw:
11218 case Intrinsic::ppc_altivec_stvx:
11219 case Intrinsic::ppc_altivec_stvxl:
11220 case Intrinsic::ppc_altivec_stvebx:
11221 case Intrinsic::ppc_altivec_stvehx:
11222 case Intrinsic::ppc_altivec_stvewx:
11223 case Intrinsic::ppc_vsx_stxvd2x:
11224 case Intrinsic::ppc_vsx_stxvw4x: {
11226 switch (Intrinsic) {
11227 case Intrinsic::ppc_altivec_stvebx:
11230 case Intrinsic::ppc_altivec_stvehx:
11233 case Intrinsic::ppc_altivec_stvewx:
11236 case Intrinsic::ppc_vsx_stxvd2x:
11239 case Intrinsic::ppc_qpx_qvstfd:
11242 case Intrinsic::ppc_qpx_qvstfs:
11245 case Intrinsic::ppc_qpx_qvstfcd:
11248 case Intrinsic::ppc_qpx_qvstfcs:
11267 case Intrinsic::ppc_qpx_qvstfda:
11268 case Intrinsic::ppc_qpx_qvstfsa:
11269 case Intrinsic::ppc_qpx_qvstfcda:
11270 case Intrinsic::ppc_qpx_qvstfcsa:
11271 case Intrinsic::ppc_qpx_qvstfiwa: {
11273 switch (Intrinsic) {
11274 case Intrinsic::ppc_qpx_qvstfda:
11277 case Intrinsic::ppc_qpx_qvstfsa:
11280 case Intrinsic::ppc_qpx_qvstfcda:
11283 case Intrinsic::ppc_qpx_qvstfcsa:
11321 unsigned DstAlign,
unsigned SrcAlign,
11322 bool IsMemset,
bool ZeroMemset,
11329 if (Subtarget.
hasQPX() && Size >= 32 && (!IsMemset || Size >= 64) &&
11330 (!SrcAlign || SrcAlign >= 32) && (!DstAlign || DstAlign >= 32) &&
11338 (((!SrcAlign || SrcAlign >= 16) && (!DstAlign || DstAlign >= 16)) ||
11357 if (BitSize == 0 || BitSize > 64)
11367 return NumBits1 == 64 && NumBits2 == 32;
11375 return NumBits1 == 64 && NumBits2 == 32;
11381 if (
LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) {
11414 bool *
Fast)
const {
11428 if (Subtarget.
hasVSX()) {
11469 static const MCPhysReg ScratchRegs[] = {
11470 PPC::X12, PPC::LR8, PPC::CTR8, 0
11473 return ScratchRegs;
11478 EVT VT ,
unsigned DefinedValues)
const {
11482 if (Subtarget.
hasQPX()) {
bool hasType(MVT vt) const
hasType - return true if this TargetRegisterClass has the ValueType vt.
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
PPCTargetLowering(const PPCTargetMachine &TM, const PPCSubtarget &STI)
SDValue getTruncStore(SDValue Chain, SDLoc dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT TVT, bool isNonTemporal, bool isVolatile, unsigned Alignment, const AAMDNodes &AAInfo=AAMDNodes())
ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
G8RC = ADDI_TLSLD_L_ADDR G8RReg, Symbol, Symbol - Op that combines ADDI_TLSLD_L and GET_TLSLD_ADDR un...
cl::opt< bool > ANDIGlueBug
X3 = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS model, produces an ADDI8 instruction tha...
void setFrameAddressIsTaken(bool T)
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
unsigned getValueSizeInBits(unsigned ResNo) const
Returns MVT::getSizeInBits(getValueType(ResNo)).
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
void push_back(const T &Elt)
const MachineFunction * getParent() const
getParent - Return the MachineFunction containing this basic block.
The memory access reads data.
A parsed version of the target data layout string in and methods for querying it. ...
const_iterator end(StringRef path)
Get end iterator over path.
SDValue expandVSXStoreForLE(SDNode *N, DAGCombinerInfo &DCI) const
MachineBasicBlock * EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *MBB, unsigned AtomicSize, unsigned BinOpcode) const
SDValue getValue(unsigned R) const
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
FastISel * createFastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo)
Return with a flag operand, matched by 'blr'.
Newer FCTI[D,W]UZ floating-point-to-integer conversion instructions for unsigned integers.
SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG)
get_VSPLTI_elt - If this is a build_vector of constants which can be formed by using a vspltis[bhw] i...
The memory access writes data.
static Instruction * callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id)
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a byte-swapping load instruction.
void setVarArgsNumGPR(unsigned Num)
raw_ostream & errs()
This returns a reference to a raw_ostream for standard error.
bool use64BitRegs() const
use64BitRegs - Return true if in 64-bit mode or if we should use 64-bit registers in 32-bit mode when...
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand...
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant, which is required to be operand #1) half of the integer or float value specified as operand #0.
unsigned getRegisterByName(const char *RegName, EVT VT, SelectionDAG &DAG) const override
Return the register ID of the name passed in.
unsigned getByValTypeAlignment(Type *Ty, const DataLayout &DL) const override
getByValTypeAlignment - Return the desired alignment for ByVal aggregate function arguments in the ca...
static APInt getAllOnesValue(unsigned numBits)
Get the all-ones value.
LLVMContext * getContext() const
SDValue getCopyToReg(SDValue Chain, SDLoc dl, unsigned Reg, SDValue N)
QVFPERM = This corresponds to the QPX qvfperm instruction.
uint64_t getZExtValue() const
Get zero extended value.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, SDLoc DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd)...
SDValue CombineTo(SDNode *N, ArrayRef< SDValue > To, bool AddTo=true)
static cl::opt< bool > DisablePPCUnaligned("disable-ppc-unaligned", cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden)
SDValue getIndexedLoad(SDValue OrigLoad, SDLoc dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
size_t size() const
size - Get the string size.
static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, SelectionDAG &DAG)
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR (an vector value) starting with the ...
Reloc::Model getRelocationModel() const
Returns the code generation relocation model.
BR_CC - Conditional branch.
static void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, SDLoc dl, bool isPPC64, int SPDiff, unsigned NumBytes, SDValue LROp, SDValue FPOp, bool isDarwinABI, SmallVectorImpl< TailCallArgumentInfo > &TailCallArguments)
LocInfo getLocInfo() const
GPRC = address of GLOBAL_OFFSET_TABLE.
bool hasOneUse() const
Return true if there is exactly one use of this node.
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
bool isLittleEndian() const
G8RC = ADDI_DTPREL_L G8RReg, Symbol - For the local-dynamic TLS model, produces an ADDI8 instruction ...
A Module instance is used to store all the information related to an LLVM module. ...
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
bool CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
MachineBasicBlock * EmitPartwordAtomicBinary(MachineInstr *MI, MachineBasicBlock *MBB, bool is8bit, unsigned Opcode) const
const TargetMachine & getTargetMachine() const
bool isAtLeastAcquire(AtomicOrdering Ord)
Returns true if the ordering is at least as strong as acquire (i.e.
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG, SDLoc dl, EVT DestVT=MVT::Other)
BuildIntrinsicOp - Return a unary operator intrinsic node with the specified intrinsic ID...
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
SDValue getMergeValues(ArrayRef< SDValue > Ops, SDLoc dl)
Create a MERGE_VALUES node from the given operands.
int getSplatIndex() const
void setLRStoreRequired()
const TargetMachine & getTarget() const
unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue OldRetAddr, SDValue OldFP, int SPDiff, bool isPPC64, bool isDarwinABI, SDLoc dl)
EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to the appropriate stack sl...
The following two target-specific nodes are used for calls through function pointers in the 64-bit SV...
VRRC = VADD_SPLAT Elt, EltSize - Temporary node to be expanded during instruction selection to optimi...
MO_LO, MO_HA - lo16(symbol) and ha16(symbol)
A Stackmap instruction captures the location of live variables at its position in the instruction str...
const TargetSubtargetInfo & getSubtarget() const
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain...
CallInst - This class represents a function call, abstracting a target machine's calling convention...
static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG)
void computeKnownBits(SDValue Op, APInt &KnownZero, APInt &KnownOne, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in the KnownZero/KnownO...
size_type count(PtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Get a value with low bits set.
const GlobalValue * getGlobal() const
QBRC, CHAIN = QVLFSb CHAIN, Ptr The 4xf32 load used for v4i1 constants.
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change...
bool hasLazyResolverStub(const GlobalValue *GV) const
hasLazyResolverStub - Return true if accesses to the specified global have to go through a dyld lazy ...
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
SDValue getSelectCC(SDLoc DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond)
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
void setFramePointerSaveIndex(int Idx)
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Type * getTypeForEVT(LLVMContext &Context) const
getTypeForEVT - This method returns an LLVM type corresponding to the specified EVT.
unsigned getSizeInBits() const
SDValue getLoad(EVT VT, SDLoc dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, bool isVolatile, bool isNonTemporal, bool isInvariant, unsigned Alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands...
bool isDoubleTy() const
isDoubleTy - Return true if this is 'double', a 64-bit IEEE fp type.
TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or anything else with this node...
unsigned getByValSize() const
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
CHAIN = RFEBB CHAIN, State - Return from event-based branch.
unsigned getNumOperands() const
Return the number of values used by this operation.
static unsigned CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT, ISD::ArgFlagsTy Flags, unsigned PtrByteSize)
CalculateStackSlotAlignment - Calculates the alignment of this argument on the stack.
bool isDarwin() const
isDarwin - True if this is any darwin platform.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
const Function * getParent() const
Return the enclosing method, or null if none.
unsigned getNumOperands() const
unsigned getValueSizeInBits() const
Returns the size of the value in bits.
virtual bool isZExtFree(Type *, Type *) const
Return true if any actual instruction that defines a value of type Ty1 implicitly zero-extends the va...
int64_t getOffset() const
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *fromMBB)
transferSuccessorsAndUpdatePHIs - Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor blocks which refer to fromMBB to refer to this.
const SDValue & getOperand(unsigned Num) const
bool shouldExpandBuildVectorWithShuffles(EVT VT, unsigned DefinedValues) const override
const Function * getFunction() const
getFunction - Return the LLVM function that this machine code represents
unsigned getVarArgsNumGPR() const
static bool isLocalCall(const SDValue &Callee)
CALL - A direct function call.
static MachinePointerInfo getConstantPool()
getConstantPool - Return a MachinePointerInfo record that refers to the constant pool.
virtual bool shouldExpandBuildVectorWithShuffles(EVT, unsigned DefinedValues) const
CHAIN,FLAG = BCTRL(CHAIN, INFLAG) - Directly corresponds to a BCTRL instruction.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
Value * CallOperandVal
If this is the result output operand or a clobber, this is null, otherwise it is the incoming operand...
StringSwitch & Case(const char(&S)[N], const T &Value)
unsigned getValNo() const
const SDValue & getBasePtr() const
SDValue getConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offs=0, bool isT=false, unsigned char TargetFlags=0)
void setVarArgsNumFPR(unsigned Num)
bool isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a VPKUDUM instruction.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, unsigned f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
getMachineMemOperand - Allocate a new MachineMemOperand.
bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
getPreIndexedAddressParts - returns true by value, base pointer and offset pointer and addressing mod...
Newer FCFID[US] integer-to-floating-point conversion instructions for unsigned integers and single-pr...
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
int getReturnAddrSaveIndex() const
bool bitsLT(EVT VT) const
bitsLT - Return true if this has less bits than VT.
bool isLegalICmpImmediate(int64_t Imm) const override
isLegalICmpImmediate - Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructions which can compare a register against the immediate without having to materialize the immediate into a register.
bool isUnsignedIntSetCC(CondCode Code)
isUnsignedIntSetCC - Return true if this is a setcc instruction that performs an unsigned comparison ...
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(const char *reason, bool gen_crash_diag=true)
Reports a serious error, calling any installed error handler.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
SDValue getExternalSymbol(const char *Sym, EVT VT)
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a VPKUHUM instruction.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
SDValue getMemcpy(SDValue Chain, SDLoc dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
static MachinePointerInfo getFixedStack(int FI, int64_t offset=0)
getFixedStack - Return a MachinePointerInfo record that refers to the the specified FrameIndex...
static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering, unsigned NumBytes)
EnsureStackAlignment - Round stack frame size up from NumBytes to ensure minimum alignment required f...
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
GlobalBaseReg - On Darwin, this node represents the result of the mflr at function entry...
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, unsigned Align=1, bool *Fast=nullptr) const override
Is unaligned memory access allowed for the given type, and is it fast relative to software emulation...
bool isFPExtFree(EVT VT) const override
Return true if an fpext operation is free (for instance, because single-precision floating-point numb...
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic...
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
COPY - Target-independent register copy.
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations...
bool isVector() const
isVector - Return true if this is a vector value type.
bool isMacOSX() const
isMacOSX - Is this a Mac OS X triple.
BlockAddress - The address of a basic block.
SDValue getStore(SDValue Chain, SDLoc dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, bool isVolatile, bool isNonTemporal, unsigned Alignment, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
void setVarArgsStackOffset(int Offset)
MO_PLT_OR_STUB - On a symbol operand "FOO", this indicates that the reference is actually to the "FOO...
MachineMemOperand - A description of a memory reference used in the backend.
std::string getEVTString() const
getEVTString - This function returns value type as a string, e.g.
static std::string getRecipOp(const char *Base, EVT VT)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
PPCFunctionInfo - This class is derived from MachineFunction private PowerPC target-specific informat...
int64_t getOffset() const
const HexagonInstrInfo * TII
static Type * getFloatTy(LLVMContext &C)
MachineBasicBlock * emitEHSjLjLongJmp(MachineInstr *MI, MachineBasicBlock *MBB) const
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
G8RC = ADDI_TLSGD_L_ADDR G8RReg, Symbol, Symbol - Op that combines ADDI_TLSGD_L and GET_TLS_ADDR unti...
static MachineOperand CreateReg(unsigned Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false)
Shift and rotation operations.
StructType - Class to represent struct types.
static cl::opt< bool > DisablePPCPreinc("disable-ppc-preinc", cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden)
Base class for LoadSDNode and StoreSDNode.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT, SelectionDAG &DAG, SDLoc dl)
BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified amount.
MachineFunction & getMachineFunction() const
unsigned getMinReservedArea() const
static void advance(T &it, size_t Val)
static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign, unsigned MaxMaxAlign)
getMaxByValAlign - Helper for getByValTypeAlignment to determine the desired ByVal argument alignment...
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
SDValue getTargetGlobalAddress(const GlobalValue *GV, SDLoc DL, EVT VT, int64_t offset=0, unsigned char TargetFlags=0)
static Constant * get(ArrayRef< Constant * > V)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
const TargetRegisterClass * getRegClass(unsigned Reg) const
getRegClass - Return the register class of the specified virtual register.
FLT_ROUNDS_ - Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest 2 Round to ...
int getVarArgsFrameIndex() const
Reg
All possible values of the reg field in the ModR/M byte.
void setCondCodeAction(ISD::CondCode CC, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
static cl::opt< bool > DisablePPCFloatInVariadic("disable-ppc-float-in-variadic", cl::desc("disable saving float registers for va_start on PPC"), cl::Hidden)
MachinePointerInfo getWithOffset(int64_t O) const
EVT getScalarType() const
getScalarType - If this is a vector type, return the element type, otherwise return this...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
Number of individual test Apply this number of consecutive mutations to each input exit after the first new interesting input is found the minimized corpus is saved into the first input directory Number of jobs to run If min(jobs, NumberOfCpuCores()/2)\" is used.") FUZZER_FLAG_INT(reload
Direct move from a GPR to a VSX register (algebraic)
X3 = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS model, produces an ADDI8 instruction t...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
bool isStrongDefinitionForLinker() const
Returns true if this global's definition will be the one chosen by the linker.
int getMaskElt(unsigned Idx) const
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
unsigned getStoreSize() const
getStoreSize - Return the number of bytes overwritten by a store of the specified value type...
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG...
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
QVALIGNI = This corresponds to the QPX qvaligni instruction.
const MachineInstrBuilder & addImm(int64_t Val) const
addImm - Add a new immediate operand.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Sched::Preference getSchedulingPreference() const
Return target scheduling preference.
bool isInteger() const
isInteger - Return true if this is an integer, or a vector integer type.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
bool getBoolValue() const
Convert APInt to a boolean value.
SmallVector< ISD::InputArg, 32 > Ins
STACKSAVE - STACKSAVE has one operand, an input chain.
static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT)
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
EVT getVectorElementType() const
getVectorElementType - Given a vector type, return the type of each element.
void assign(size_type NumElts, const T &Elt)
unsigned getPrefLoopAlignment(MachineLoop *ML) const override
Return the preferred loop alignment.
unsigned getReturnSaveOffset() const
getReturnSaveOffset - Return the previous frame offset to save the return address.
bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector...
static const unsigned PerfectShuffleTable[6561+1]
bool isInConsecutiveRegs() const
load Combine Adjacent Loads
SDValue getCALLSEQ_START(SDValue Chain, SDValue Op, SDLoc DL)
Return a new CALLSEQ_START node, which always must have a glue result (to ensure it's not CSE'd)...
unsigned isMacOSXVersionLT(unsigned Major, unsigned Minor=0, unsigned Micro=0) const
isMacOSXVersionLT - Comparison function for checking OS X version compatibility, which handles suppor...
bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize)
isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand specifies a splat of a singl...
unsigned getLocReg() const
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
unsigned NoNaNsFPMath
NoNaNsFPMath - This flag is enabled when the -enable-no-nans-fp-math flag is specified on the command...
static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned LHSStart, unsigned RHSStart)
isVMerge - Common function, used to match vmrg* shuffles.
static bool getAltivecCompareInfo(SDValue Intrin, int &CompareOpc, bool &isDot, const PPCSubtarget &Subtarget)
getAltivecCompareInfo - Given an intrinsic, return false if it is not an altivec comparison.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const
ValTy * getCalledValue() const
getCalledValue - Return the pointer to function that is being called.
SDValue getRegisterMask(const uint32_t *RegMask)
SDValue getTargetFrameIndex(int FI, EVT VT)
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
LowerOperation - Provide custom lowering hooks for some operations.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - Return the ISD::SETCC ValueType
CHAIN = BDNZ CHAIN, DESTBB - These are used to create counter-based loops.
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
R32 = MFOCRF(CRREG, INFLAG) - Represents the MFOCRF instruction.
G8RC = ADDIS_TLSGD_HA X2, Symbol - For the general-dynamic TLS model, produces an ADDIS8 instruction ...
ArrayType - Class to represent array types.
This contains information for each constraint that we are lowering.
Simple integer binary arithmetic operators.
unsigned getMatchingSuperReg(unsigned Reg, unsigned SubIdx, const TargetRegisterClass *RC) const
getMatchingSuperReg - Return a super-register of the specified register Reg so its sub-register of in...
SDValue getVectorShuffle(EVT VT, SDLoc dl, SDValue N1, SDValue N2, const int *MaskElts)
Return an ISD::VECTOR_SHUFFLE node.
MO_NLP_HIDDEN_FLAG - If this bit is set, the symbol reference is to a symbol with hidden visibility...
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself...
SmallVector< ISD::OutputArg, 32 > Outs
CHAIN = STXVD2X CHAIN, VSRC, Ptr - Occurs only for little endian.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
bool hasInvariantFunctionDescriptors() const
bool isLittleEndian() const
Layout endianness...
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
const SDValue & getBasePtr() const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
MachineBasicBlock * emitPatchPoint(MachineInstr *MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
static const MCPhysReg FPR[]
FPR - The set of FP registers that should be allocated for arguments, on Darwin.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc, MachineFunction &MF) const override
getOptimalMemOpType - Returns the target specific optimal type for load and store operations as a res...
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
EVT getMemoryVT() const
Return the type of the in-memory value.
bool isInConsecutiveRegsLast() const
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
int getFramePointerSaveIndex() const
SDValue getAnyExtOrTrunc(SDValue Op, SDLoc DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
static bool isFloatingPointZero(SDValue Op)
isFloatingPointZero - Return true if this is 0.0 or -0.0.
bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for a VRGL* instruction with the ...
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
bool isSignedIntSetCC(CondCode Code)
isSignedIntSetCC - Return true if this is a setcc instruction that performs a signed comparison when ...
bool CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
bool isFMAFasterThanFMulAndFAdd(EVT VT) const override
isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster than a pair of fmul and fadd i...
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
const DataLayout & getDataLayout() const
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
const BasicBlock * getBasicBlock() const
getBasicBlock - Return the LLVM basic block that this instance corresponded to originally.
UNDEF - An undefined node.
This class is used to represent ISD::STORE nodes.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
uint32_t FloatToBits(float Float)
FloatToBits - This function takes a float and returns the bit equivalent 32-bit integer.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the specified, possibly variable...
TargetInstrInfo - Interface to description of machine instruction set.
LLVM_CONSTEXPR size_t array_lengthof(T(&)[N])
Find the length of an array.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Get a value with high bits set.
void setTailCallSPDelta(int size)
VSRC, CHAIN = LXVD2X_LE CHAIN, Ptr - Occurs only for little endian.
mmo_iterator memoperands_end() const
SDNode * getNode() const
get the SDNode which holds the desired result
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
bundle_iterator< MachineInstr, instr_iterator > iterator
unsigned getStoreSize() const
getStoreSize - Return the number of bytes overwritten by a store of the specified value type...
A switch()-like statement whose cases are string literals.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set, or Regs.size() if they are all allocated.
Instruction * emitTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord, bool IsStore, bool IsLoad) const override
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
const MachineInstrBuilder & setMemRefs(MachineInstr::mmo_iterator b, MachineInstr::mmo_iterator e) const
unsigned UnsafeFPMath
UnsafeFPMath - This flag is enabled when the -enable-unsafe-fp-math flag is specified on the command ...
Patchable call instruction - this instruction represents a call to a constant address, followed by a series of NOPs.
Control flow instructions. These all have token chains.
static void LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg, SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64, bool isTailCall, bool isVector, SmallVectorImpl< SDValue > &MemOpChains, SmallVectorImpl< TailCallArgumentInfo > &TailCallArguments, SDLoc dl)
LowerMemOpCallTo - Store the argument to the stack or remember it in case of tail calls...
const SDValue & getBasePtr() const
static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, SDValue RHS, SelectionDAG &DAG, SDLoc dl)
GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit the specified operations t...
std::size_t countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the least significant bit to the most stopping at the first 1...
FSEL - Traditional three-operand fsel node.
unsigned getFramePointerSaveOffset() const
getFramePointerSaveOffset - Return the previous frame offset to save the frame pointer.
MVT - Machine Value Type.
LLVM Basic Block Representation.
const Triple & getTargetTriple() const
const SDValue & getOperand(unsigned i) const
The instances of the Type class are immutable: once they are created, they are never changed...
This is an important class for using LLVM in a threaded context.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type...
Simple binary floating point operators.
ch, gl = CR6[UN]SET ch, inglue - Toggle CR bit 6 for SVR4 vararg calls
void setTargetDAGCombine(ISD::NodeType NT)
Targets should invoke this method for each target independent node that they want to provide a custom...
void setReturnAddrSaveIndex(int idx)
bool isNonTemporal() const
bool isVectorTy() const
isVectorTy - True if this is an instance of VectorType.
bool isOperationLegalOrCustom(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
This is an important base class in LLVM.
static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, SDLoc dl)
CreateCopyOfByValArgument - Make a copy of an aggregate at address specified by "Src" to address "Dst...
virtual unsigned getPrefLoopAlignment(MachineLoop *ML=nullptr) const
Return the preferred loop alignment.
MO_NLP_FLAG - If this bit is set, the symbol reference is actually to the non_lazy_ptr for the global...
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE...
G8RC = ADDIS_DTPREL_HA X3, Symbol - For the local-dynamic TLS model, produces an ADDIS8 instruction t...
bool isVector() const
isVector - Return true if this is a vector value type.
Direct move from a VSX register to a GPR.
static bool is64Bit(const char *name)
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL...
const Constant * getConstVal() const
CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a MTCTR instruction.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
bool isFloatingPoint() const
isFloatingPoint - Return true if this is a FP, or a vector FP type.
const MachineOperand & getOperand(unsigned i) const
unsigned getVarArgsNumFPR() const
static int CalculateTailCallSPDiff(SelectionDAG &DAG, bool isTailCall, unsigned ParamSize)
CalculateTailCallSPDiff - Get the amount the stack pointer has to be adjusted to accommodate the argu...
bool isFloatTy() const
isFloatTy - Return true if this is 'float', a 32-bit IEEE fp type.
unsigned getLiveInVirtReg(unsigned PReg) const
getLiveInVirtReg - If PReg is a live-in physical register, return the corresponding live-in physical ...
ConstantFP - Floating Point Values [float, double].
unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize, SelectionDAG &DAG)
getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the specified isSplatShuffleMask...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base, unsigned Bytes, int Dist, SelectionDAG &DAG)
STFIWX - The STFIWX instruction.
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint, return the type of constraint it is for this target...
FCFID - The FCFID instruction, taking an f64 operand and producing and f64 value containing the FP re...
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
void AddToWorklist(SDNode *N)
const PPCFrameLowering * getFrameLowering() const override
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
static Type * getVoidTy(LLVMContext &C)
This class provides iterator support for SDUse operands that use a specific SDNode.
static void CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, SDValue Arg, int SPDiff, unsigned ArgOffset, SmallVectorImpl< TailCallArgumentInfo > &TailCallArguments)
CalculateTailCallArgDest - Remember Argument for later processing.
SDValue getCopyFromReg(SDValue Chain, SDLoc dl, unsigned Reg, EVT VT)
SDValue getSExtOrTrunc(SDValue Op, SDLoc DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a VPKUWUM instruction.
INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.
static bool isIntS16Immediate(SDNode *N, short &Imm)
isIntS16Immediate - This method tests to see if the node is either a 32-bit or 64-bit immediate...
SDValue getTargetConstant(uint64_t Val, SDLoc DL, EVT VT, bool isOpaque=false)
unsigned getBitWidth() const
Return the number of bits in the APInt.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
void setVarArgsFrameIndex(int Index)
unsigned getOpcode() const
static unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag, SDValue &Chain, SDValue CallSeqStart, SDLoc dl, int SPDiff, bool isTailCall, bool IsPatchPoint, bool hasNest, SmallVectorImpl< std::pair< unsigned, SDValue > > &RegsToPass, SmallVectorImpl< SDValue > &Ops, std::vector< EVT > &NodeTys, ImmutableCallSite *CS, const PPCSubtarget &Subtarget)
TRAP - Trapping instruction.
static const MCPhysReg QFPR[]
QFPR - The set of QPX registers that should be allocated for arguments.
unsigned GuaranteedTailCallOpt
GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is specified on the commandline...
bool isEnabled(const StringRef &Key) const
Return true if the reciprocal operation has been enabled by default or from the command-line.
CHAIN = CLRBHRB CHAIN - Clear branch history rolling buffer.
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
void setPrefFunctionAlignment(unsigned Align)
Set the target's preferred function alignment.
GPRC, CHAIN = LFIWAX CHAIN, Ptr - This is a floating-point load which sign-extends from a 32-bit inte...
static mvt_range vector_valuetypes()
bool has64BitSupport() const
has64BitSupport - Return true if the selected CPU supports 64-bit instructions, regardless of whether...
G8RC = ADDIS_TLSLD_HA X2, Symbol - For the local-dynamic TLS model, produces an ADDIS8 instruction th...
static void StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG, SDValue Chain, const SmallVectorImpl< TailCallArgumentInfo > &TailCallArgs, SmallVectorImpl< SDValue > &MemOpChains, SDLoc dl)
StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
G8RC = LD_GOT_TPREL_L Symbol, G8RReg - Used by the initial-exec TLS model, produces a LD instruction ...
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool Immutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
QVESPLATI = This corresponds to the QPX qvesplati instruction.
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero, APInt &KnownOne, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
use_iterator use_begin() const
Provide iteration support to walk over all uses of an SDNode.
const SDValue & getValue() const
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
unsigned NoInfsFPMath
NoInfsFPMath - This flag is enabled when the -enable-no-infs-fp-math flag is specified on the command...
PPCTargetMachine - Common code between 32-bit and 64-bit PowerPC targets.
unsigned MaxStoresPerMemmove
Specify maximum bytes of store instructions per memmove call.
SDValue getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, bool isVolatile, bool isNonTemporal, bool isInvariant, unsigned Alignment, const AAMDNodes &AAInfo=AAMDNodes())
Bit counting operators with an undefined result for zero inputs.
MachineInstrBuilder BuildMI(MachineFunction &MF, DebugLoc DL, const MCInstrDesc &MCID)
BuildMI - Builder interface.
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
int isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift amount, otherwise return -1...
EVT - Extended Value Type.
static UndefValue * get(Type *T)
get() - Static factory methods - Return an 'undef' object of the specified type.
std::vector< ArgListEntry > ArgListTy
void setMinReservedArea(unsigned size)
static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT, SelectionDAG &DAG, SDLoc dl)
BuildSplatI - Build a canonical splati of Val with an element size of SplatSize.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
CallInst * CreateCall(Value *Callee, ArrayRef< Value * > Args=None, const Twine &Name="")
This structure contains all information that is necessary for lowering calls.
ISD::MemIndexedMode getAddressingMode() const
Return the addressing mode for this load or store: unindexed, pre-inc, pre-dec, post-inc, or post-dec.
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements)
getVectorVT - Returns the EVT that represents a vector NumElements in length, where each element is o...
MachinePointerInfo - This class contains a discriminated union of information about pointers in memor...
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
void setUseUnderscoreLongJmp(bool Val)
Indicate whether this target prefers to use _longjmp to implement llvm.longjmp or the version without...
Instruction * emitLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord, bool IsStore, bool IsLoad) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
const BlockAddress * getBlockAddress() const
bool hasPartwordAtomics() const
CHAIN,FLAG = BCTRL(CHAIN, ADDR, INFLAG) - The combination of a bctrl instruction and the TOC reload r...
SDValue CreateStackTemporary(EVT VT, unsigned minAlign=1)
Create a stack temporary, suitable for holding the specified value type.
bool isEXTLoad(const SDNode *N)
Returns true if the specified node is a EXTLOAD.
const MachinePointerInfo & getPointerInfo() const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
LowerAsmOperandForConstraint - Lower the specified operand into the Ops vector.
const MDNode * getRanges() const
Returns the Ranges that describes the dereference.
unsigned getByValAlign() const
const SDValue & getOffset() const
unsigned getTOCSaveOffset() const
getTOCSaveOffset - Return the previous frame offset to save the TOC register – 64-bit SVR4 ABI only...
ArrayRef< int > getMask() const
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
SDValue getTargetConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offset=0, unsigned char TargetFlags=0)
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned char TargetFlags=0)
TokenFactor - This node takes multiple tokens as input and produces a single token result...
int getVarArgsStackOffset() const
const uint32_t * getNoPreservedMask() const
QBFLT = Access the underlying QPX floating-point boolean representation.
unsigned getABITypeAlignment(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
const PPCRegisterInfo * getRegisterInfo() const override
bool hasP8Altivec() const
static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, ISD::ArgFlagsTy Flags, unsigned PtrByteSize, unsigned LinkageSize, unsigned ParamAreaSize, unsigned &ArgOffset, unsigned &AvailableFPRs, unsigned &AvailableVRs, bool HasQPX)
CalculateStackSlotUsed - Return whether this argument will use its stack slot (instead of being passe...
CCState - This class holds information needed while lowering arguments and return values...
X3 = GET_TLSLD_ADDR X3, Symbol - For the local-dynamic TLS model, produces a call to __tls_get_addr(s...
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
bool hasDirectMove() const
GPRC = TOC_ENTRY GA, TOC Loads the entry for GA from the TOC, where the TOC base is given by the last...
PICLevel::Level getPICLevel() const
Returns the PIC level (small or large model)
void setNode(SDNode *N)
set the SDNode
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side...
SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, std::vector< SDNode * > *Created) const override
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
void setExceptionPointerRegister(unsigned R)
If set to a physical register, this sets the register that receives the exception address on entry to...
shadow stack gc Shadow Stack GC Lowering
bool CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT, CCValAssign::LocInfo &LocInfo, ISD::ArgFlagsTy &ArgFlags, CCState &State)
SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, unsigned Bytes, int Dist, SelectionDAG &DAG)
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
X = FP_ROUND_INREG(Y, VT) - This operator takes an FP register, and rounds it to a floating point val...
const PPCInstrInfo * getInstrInfo() const override
bool isPredecessorOf(const SDNode *N) const
Return true if this node is a predecessor of N.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
G8RC = ADD_TLS G8RReg, Symbol - Used by the initial-exec TLS model, produces an ADD instruction that ...
unsigned getObjectAlignment(int ObjectIdx) const
Return the alignment of the specified stack object.
bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG) const
SelectAddressRegRegOnly - Given the specified addressed, force it to be represented as an indexed [r+...
Type * getType() const
All values are typed, get the type of this value.
Provides information about what library functions are available for the current target.
CCValAssign - Represent assignment of one arg/retval to a location.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
BRCOND - Conditional branch.
static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, unsigned PtrByteSize)
CalculateStackSlotSize - Calculates the size reserved for this argument on the stack.
const SDValue & getChain() const
Byte Swap and Counting operators.
CHAIN = SC CHAIN, Imm128 - System call.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
This is an abstract virtual class for memory operations.
const char * getTargetNodeName(unsigned Opcode) const override
getTargetNodeName() - This method returns the name of a target specific DAG node. ...
BasicBlock * GetInsertBlock() const
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
bool enableAggressiveFMAFusion(EVT VT) const override
Return true if target always beneficiates from combining into FMA for a given value type...
void setHasMultipleConditionRegisters(bool hasManyRegs=true)
Tells the code generator that the target has multiple (allocatable) condition registers that can be u...
X3 = GET_TLS_ADDR X3, Symbol - For the general-dynamic TLS model, produces a call to __tls_get_addr(s...
static SDNode * isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG)
isCallCompatibleAddress - Return the immediate to use if the specified 32-bit value is representable ...
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset...
MachineFrameInfo * getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
Represents one node in the SelectionDAG.
static Constant * get(Type *Ty, double V)
get() - This returns a ConstantFP, or a vector containing a splat of a ConstantFP, for the specified value in the specified type.
MachineBasicBlock * emitEHSjLjSetJmp(MachineInstr *MI, MachineBasicBlock *MBB) const
static MachinePointerInfo getStack(int64_t Offset)
getStack - stack pointer relative access.
VPERM - The PPC VPERM Instruction.
static cl::opt< AlignMode > Align(cl::desc("Load/store alignment support"), cl::Hidden, cl::init(NoStrictAlign), cl::values(clEnumValN(StrictAlign,"aarch64-strict-align","Disallow all unaligned memory accesses"), clEnumValN(NoStrictAlign,"aarch64-no-strict-align","Allow unaligned memory accesses"), clEnumValEnd))
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
R Default(const T &Value) const
static mvt_range integer_valuetypes()
i1 = ANDIo_1_[EQ|GT]_BIT(i32 or i64 x) - Represents the result of the eq or gt bit of CR0 after execu...
unsigned Log2_32(uint32_t Value)
Log2_32 - This function returns the floor log base 2 of the specified value, -1 if the value is zero...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
SDValue expandVSXLoadForLE(SDNode *N, DAGCombinerInfo &DCI) const
G8RC = ADDIS_GOT_TPREL_HA X2, Symbol - Used by the initial-exec TLS model, produces an ADDIS8 instruc...
Value * getArgOperand(unsigned i) const
getArgOperand/setArgOperand - Return/set the i-th call argument.
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
VectorType - Class to represent vector types.
void setIndexedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
Target - Wrapper for Target specific information.
Class for arbitrary precision integers.
void setExceptionSelectorRegister(unsigned R)
If set to a physical register, this sets the register that receives the exception typeid on entry to ...
QVGPCI = This corresponds to the QPX qvgpci instruction.
virtual const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
getCallPreservedMask - Return a mask of call-preserved registers for the given calling convention on ...
bool isLegalAddImmediate(int64_t Imm) const override
isLegalAddImmediate - Return true if the specified immediate is legal add immediate, that is the target has add instructions which can add a register and the immediate without having to materialize the immediate into a register.
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
A "pseudo-class" with methods for operating on BUILD_VECTORs.
Select(COND, TRUEVAL, FALSEVAL).
void setMinFunctionAlignment(unsigned Align)
Set the target's minimum function alignment (in log2(bytes))
int64_t getSExtValue() const
op_iterator op_begin() const
bool isIntegerTy() const
isIntegerTy - True if this is an instance of IntegerType.
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const
Return the register class that should be used for the specified value type.
static use_iterator use_end()
void setPrefLoopAlignment(unsigned Align)
Set the target's preferred loop alignment.
ZERO_EXTEND - Used for integer types, zeroing the new bits.
bool isPowerOf2_64(uint64_t Value)
isPowerOf2_64 - This function returns true if the argument is a power of two 0 (64 bit edition...
LLVM_ATTRIBUTE_UNUSED_RESULT std::enable_if< !is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
ANY_EXTEND - Used for integer types. The high bits are undefined.
bool SelectAddressRegReg(SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG) const
SelectAddressRegReg - Given the specified addressed, check to see if it can be represented as an inde...
The combination of sra[wd]i and addze used to implemented signed integer division by a power of 2...
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
iterator insert(iterator I, T &&Elt)
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
bool enableMachineScheduler() const override
static SDValue getTOCEntry(SelectionDAG &DAG, SDLoc dl, bool Is64Bit, SDValue GA)
GPRC = address of GLOBAL_OFFSET_TABLE.
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
block_iterator block_end() const
BR_JT - Jumptable branch.
Representation of each machine instruction.
static MachinePointerInfo getGOT()
getGOT - Return a MachinePointerInfo record that refers to a GOT entry.
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the source.
uint64_t MinAlign(uint64_t A, uint64_t B)
MinAlign - A and B are either alignments or offsets.
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
SDValue getStackArgumentTokenFactor(SDValue Chain)
Compute a TokenFactor to force all the incoming stack arguments to be loaded from the stack...
Represents a use of a SDNode.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned char TargetFlags=0)
SmallVector< SDValue, 32 > OutVals
GPRC, CHAIN = MFBHRBE CHAIN, Entry, Dummy - Move from branch history rolling buffer entry...
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
Bitwise operators - logical and, logical or, logical xor.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
Reciprocal estimate instructions (unary FP ops).
bool equals_lower(StringRef RHS) const
equals_lower - Check for string equality, ignoring case.
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
bool isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGEOShuffleMask - Return true if this is a shuffle mask suitable for a VMRGEW or VMRGOW instructi...
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
F8RC = MFFS - This moves the FPSCR (not modeled) into the register.
ImmutableCallSite - establish a view to a call site for examination.
unsigned getSizeInBits() const
getSizeInBits - Return the size of the specified value type in bits.
void clearBit(unsigned bitPosition)
Set a given bit to 0.
SDValue getIndexedStore(SDValue OrigStoe, SDLoc dl, SDValue Base, SDValue Offset, ISD::MemIndexedMode AM)
static MachineOperand CreateImm(int64_t Val)
ArrayRef< SDUse > ops() const
int32_t SignExtend32(uint32_t x)
SignExtend32 - Sign extend B-bit number x to 32-bit int.
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
Direct move from a GPR to a VSX register (zero)
Fast - This calling convention attempts to make calls as fast as possible (e.g.
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
The CMPB instruction (takes two operands of i32 or i64).
bool isIndexed() const
Return true if this is a pre/post inc/dec load/store.
unsigned MaxStoresPerMemmoveOptSize
Maximum number of store instructions that may be substituted for a call to memmove, used for functions with OpSize attribute.
unsigned MaxStoresPerMemcpyOptSize
Maximum number of store operations that may be substituted for a call to memcpy, used for functions w...
void setStackPointerRegisterToSaveRestore(unsigned R)
If set to a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save and restore.
iterator find(const KeyT &Val)
CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a byte-swapping store instruction. ...
op_iterator op_end() const
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const SDValue & getOffset() const
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
static void setUsesTOCBasePtr(MachineFunction &MF)
MachineSDNode * getMachineNode(unsigned Opcode, SDLoc dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s), MachineInstr opcode, and operands.
TC_RETURN - A tail call return.
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
FSINCOS - Compute both fsin and fcos as a single operation.
bool isInt< 16 >(int64_t x)
unsigned MaxStoresPerMemcpy
Specify maximum bytes of store instructions per memcpy call.
bool isAtLeastRelease(AtomicOrdering Ord)
Returns true if the ordering is at least as strong as release (i.e.
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.setjmp intrinsic.
static bool isConstantOrUndef(int Op, int Val)
isConstantOrUndef - Op is either an undef node or a ConstantSDNode.
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSS, const AllocaInst *Alloca=nullptr)
Create a new statically sized stack object, returning a nonnegative identifier to represent it...
static bool isFunctionGlobalAddress(SDValue Callee)
bool useCRBits() const
useCRBits - Return true if we should store and manipulate i1 values in the individual condition regis...
EVT getValueType() const
Return the ValueType of the referenced return value.
bool hasLocalLinkage() const
SDValue getConstant(uint64_t Val, SDLoc DL, EVT VT, bool isTarget=false, bool isOpaque=false)
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
VSRC, CHAIN = XXSWAPD CHAIN, VSRC - Occurs only for little endian.
bool isPPC64() const
isPPC64 - Return true if we are generating code for 64-bit pointer mode.
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
unsigned getReg() const
getReg - Returns the register number.
bool isFloatingPoint() const
isFloatingPoint - Return true if this is a FP, or a vector FP type.
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
unsigned GetInstSizeInBytes(const MachineInstr *MI) const
GetInstSize - Return the number of bytes of code the specified instruction may be.
void insert(iterator MBBI, MachineBasicBlock *MBB)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
void setReturnAddressIsTaken(bool s)
bool isSimple() const
isSimple - Test if the given EVT is simple (as opposed to being extended).
unsigned getAlignment() const
void setMinStackArgumentAlignment(unsigned Align)
Set the minimum stack alignment of an argument (in log2(bytes)).
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
getPrimitiveSizeInBits - Return the basic size of this type if it is a primitive type.
SDValue getZExtOrTrunc(SDValue Op, SDLoc DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG, bool Aligned) const
SelectAddressRegImm - Returns true if the address N can be represented by a base register plus a sign...
static bool GetLabelAccessInfo(const TargetMachine &TM, const PPCSubtarget &Subtarget, unsigned &HiOpFlags, unsigned &LoOpFlags, const GlobalValue *GV=nullptr)
GetLabelAccessInfo - Return true if we should reference labels using a PICBase, set the HiOpFlags and...
Module * getParent()
Get the module that this global value is contained inside of...
LLVM Value Representation.
FMA - Perform a * b + c with no intermediate rounding step.
SDValue getRegister(unsigned Reg, EVT VT)
void setUseUnderscoreSetJmp(bool Val)
Indicate whether this target prefers to use _setjmp to implement llvm.setjmp or the version without _...
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
isTruncateFree - Return true if it's free to truncate a value of type Ty1 to type Ty2...
CHAIN = COND_BRANCH CHAIN, CRRC, OPC, DESTBB [, INFLAG] - This corresponds to the COND_BRANCH pseudo ...
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
void setInsertFencesForAtomic(bool fence)
Set if the DAG builder should automatically insert fences and reduce the order of atomic memory opera...
SDValue getValueType(EVT)
Disable implicit floating point insts.
PREFETCH - This corresponds to a prefetch intrinsic.
static cl::opt< bool > DisableILPPref("disable-ppc-ilp-pref", cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden)
bool isUInt< 16 >(uint64_t x)
These nodes represent the 32-bit PPC shifts that operate on 6-bit shift amounts.
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits...
virtual void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL, unsigned DstReg, ArrayRef< MachineOperand > Cond, unsigned TrueReg, unsigned FalseReg) const
Insert a select instruction into MBB before I that will copy TrueReg to DstReg when Cond is true...
unsigned getDarwinDirective() const
getDarwinDirective - Returns the -m directive specified for the cpu.
BasicBlockListType::iterator iterator
uint64_t getSize() const
getSize - Return the size in bytes of the memory reference.
const TargetLowering & getTargetLoweringInfo() const
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.Val alone...
Primary interface to the complete machine description for the target machine.
block_iterator block_begin() const
C - The default llvm calling convention, compatible with C.
StringRef - Represent a constant reference to a string, i.e.
APInt LLVM_ATTRIBUTE_UNUSED_RESULT zext(unsigned width) const
Zero extend to a new width.
SetCC operator - This evaluates to a true value iff the condition is true.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
unsigned MaxStoresPerMemsetOptSize
Maximum number of stores operations that may be substituted for the call to memset, used for functions with OptSize attribute.
unsigned getRefinementSteps(const StringRef &Key) const
Return the number of iterations necessary to refine the the result of a machine instruction for the g...
SDValue getConstantFP(double Val, SDLoc DL, EVT VT, bool isTarget=false)
SDValue getSetCC(SDLoc DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
unsigned getLocMemOffset() const
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
RESVEC = VCMP(LHS, RHS, OPC) - Represents one of the altivec VCMP* instructions.
TRUNCATE - Completely drop the high bits.
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const
FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64 operand, producing an f64 value...
Hi/Lo - These represent the high and low 16-bit parts of a global address respectively.
unsigned getLinkageSize() const
getLinkageSize - Return the size of the PowerPC ABI linkage area.
unsigned getAlignment() const
unsigned AllocateReg(unsigned Reg)
AllocateReg - Attempt to allocate one register.
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
addReg - Add a new virtual register operand...
FastISel * createFastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const override
createFastISel - This method returns a target-specific FastISel object, or null if the target does no...
FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW, FLOG, FLOG2, FLOG10, FEXP, FEXP2, FCEIL, FTRUNC, FRINT, FNEARBYINT, FROUND, FFLOOR - Perform various unary floating point operations.
F8RC = FADDRTZ F8RC, F8RC - This is an FADD done with rounding towards zero.
unsigned AllocateStack(unsigned Size, unsigned Align)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
void addSuccessor(MachineBasicBlock *succ, uint32_t weight=0)
addSuccessor - Add succ as a successor of this MachineBasicBlock.
RESVEC, OUTFLAG = VCMPo(LHS, RHS, OPC) - Represents one of the altivec VCMP*o instructions.
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
GPRC, CHAIN = LFIWZX CHAIN, Ptr - This is a floating-point load which zero-extends from a 32-bit inte...
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
EVT changeVectorElementTypeToInteger() const
changeVectorElementTypeToInteger - Return a vector with the same number of elements as this vector...
MO_PIC_FLAG - If this bit is set, the symbol reference is relative to the function's picbase...
bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for a VRGH* instruction with the ...
MVT getSimpleVT() const
getSimpleVT - Return the SimpleValueType held in the specified simple EVT.
SDValue getIntPtrConstant(uint64_t Val, SDLoc DL, bool isTarget=false)
SDValue getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align=0, bool Vol=false, bool ReadMem=true, bool WriteMem=true, unsigned Size=0)
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
void setIndexedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
uint64_t getZExtValue() const
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
int isQVALIGNIShuffleMask(SDNode *N)
If this is a qvaligni shuffle mask, return the shift amount, otherwise return -1. ...
unsigned getVectorNumElements() const
getVectorNumElements - Given a vector type, return the number of elements it contains.
This class is used to represent ISD::LOAD nodes.
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary...