58 #define DEBUG_TYPE "arm-isel"
60 STATISTIC(NumTailCalls,
"Number of tail calls");
61 STATISTIC(NumMovwMovt,
"Number of GAs materialized with movw + movt");
62 STATISTIC(NumLoopByVals,
"Number of loops generated for byval arguments");
64 "Number of constants with their storage promoted into constant pools");
68 cl::desc(
"Enable / disable ARM interworking (for debugging only)"),
73 cl::desc(
"Enable / disable promotion of unnamed_addr constants into "
78 cl::desc(
"Maximum size of constant to promote into a constant pool"),
82 cl::desc(
"Maximum size of ALL constants to promote into a constant pool"),
86 class ARMCCState :
public CCState {
91 :
CCState(CC, isVarArg, MF, locs, C) {
93 "ARMCCState users must specify whether their context is call"
94 "or prologue generation.");
102 ARM::R0, ARM::R1,
ARM::R2, ARM::R3
105 void ARMTargetLowering::addTypeForNEON(
MVT VT,
MVT PromotedLdStVT,
106 MVT PromotedBitwiseVT) {
107 if (VT != PromotedLdStVT) {
146 if (VT.
isInteger() && VT != PromotedBitwiseVT) {
169 void ARMTargetLowering::addDRTypeForNEON(
MVT VT) {
174 void ARMTargetLowering::addQRTypeForNEON(
MVT VT) {
208 static const struct {
210 const char *
const Name;
268 for (
const auto &LC : LibraryCalls) {
292 static const struct {
294 const char *
const Name;
381 for (
const auto &LC : LibraryCalls) {
391 static const struct {
393 const char *
const Name;
396 } MemOpsLibraryCalls[] = {
404 for (
const auto &LC : MemOpsLibraryCalls) {
414 static const struct {
416 const char *
const Name;
429 for (
const auto &LC : LibraryCalls) {
461 static const struct {
463 const char *
const Name;
471 for (
const auto &LC : LibraryCalls) {
848 HasStandaloneRem =
false;
853 const char *
const Name;
867 for (
const auto &LC : LibraryCalls) {
874 const char *
const Name;
888 for (
const auto &LC : LibraryCalls) {
929 InsertFencesForAtomic =
false;
942 InsertFencesForAtomic =
true;
949 InsertFencesForAtomic =
true;
969 if (!InsertFencesForAtomic) {
1163 std::pair<const TargetRegisterClass *, uint8_t>
1176 RRC = &ARM::DPRRegClass;
1186 RRC = &ARM::DPRRegClass;
1190 RRC = &ARM::DPRRegClass;
1194 RRC = &ARM::DPRRegClass;
1198 return std::make_pair(RRC, Cost);
1355 return &ARM::QQPRRegClass;
1357 return &ARM::QQQQPRRegClass;
1366 unsigned &PrefAlign)
const {
1367 if (!isa<MemIntrinsic>(CI))
1388 for (
unsigned i = 0;
i != NumVals; ++
i) {
1467 #include "ARMGenCallingConv.inc"
1474 bool isVarArg)
const {
1510 bool isVarArg)
const {
1511 return CCAssignFnForNode(CC,
false, isVarArg);
1515 bool isVarArg)
const {
1516 return CCAssignFnForNode(CC,
true, isVarArg);
1523 bool isVarArg)
const {
1524 switch (getEffectiveCallingConv(CC, isVarArg)) {
1528 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS);
1530 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
1532 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
1534 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
1536 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC);
1538 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
1544 SDValue ARMTargetLowering::LowerCallResult(
1557 for (
unsigned i = 0;
i != RVLocs.
size(); ++
i) {
1562 if (
i == 0 && isThisReturn) {
1564 "unexpected return calling convention register assignment");
1636 Chain, dl, Arg, PtrOff,
1642 RegsToPassVector &RegsToPass,
1650 unsigned id = Subtarget->
isLittle() ? 0 : 1;
1686 bool isStructRet = (Outs.
empty()) ?
false : Outs[0].Flags.
isSRet();
1687 bool isThisReturn =
false;
1688 bool isSibCall =
false;
1697 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
1699 Outs, OutVals,
Ins, DAG);
1702 "site marked musttail");
1718 unsigned NumBytes = CCInfo.getNextStackOffset();
1733 RegsToPassVector RegsToPass;
1738 for (
unsigned i = 0, realArgIdx = 0, e = ArgLocs.
size();
1740 ++
i, ++realArgIdx) {
1742 SDValue Arg = OutVals[realArgIdx];
1744 bool isByVal = Flags.
isByVal();
1772 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass,
1773 VA, ArgLocs[++
i], StackPtr, MemOpChains, Flags);
1777 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass,
1778 VA, ArgLocs[++
i], StackPtr, MemOpChains, Flags);
1782 MemOpChains.
push_back(LowerMemOpCallTo(Chain, StackPtr, Op1,
1783 dl, DAG, VA, Flags));
1786 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++
i],
1787 StackPtr, MemOpChains, Flags);
1793 "unexpected calling convention register assignment");
1795 "unexpected use of 'returned'");
1796 isThisReturn =
true;
1798 RegsToPass.push_back(std::make_pair(VA.
getLocReg(), Arg));
1799 }
else if (isByVal) {
1801 unsigned offset = 0;
1805 unsigned ByValArgsCount = CCInfo.getInRegsParamsCount();
1806 unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed();
1808 if (CurByValIdx < ByValArgsCount) {
1810 unsigned RegBegin, RegEnd;
1811 CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd);
1816 for (i = 0, j = RegBegin; j < RegEnd; i++, j++) {
1823 RegsToPass.push_back(std::make_pair(j, Load));
1828 offset = RegEnd - RegBegin;
1830 CCInfo.nextInRegsParam();
1846 SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode};
1850 }
else if (!isSibCall) {
1853 MemOpChains.
push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
1854 dl, DAG, VA, Flags));
1858 if (!MemOpChains.
empty())
1867 for (
unsigned i = 0, e = RegsToPass.size(); i != e; ++
i) {
1868 Chain = DAG.
getCopyToReg(Chain, dl, RegsToPass[i].first,
1869 RegsToPass[i].second, InFlag);
1884 for (
unsigned i = 0, e = RegsToPass.size(); i != e; ++
i) {
1885 Chain = DAG.
getCopyToReg(Chain, dl, RegsToPass[i].first,
1886 RegsToPass[i].second, InFlag);
1895 bool isDirect =
false;
1901 GV =
G->getGlobal();
1905 bool isARMFunc = !Subtarget->
isThumb() || (isStub && !Subtarget->
isMClass());
1906 bool isLocalARMFunc =
false;
1912 "long-calls codegen is not position independent!");
1916 if (isa<GlobalAddressSDNode>(Callee)) {
1929 const char *Sym = S->getSymbol();
1935 ARMPCLabelIndex, 0);
1943 }
else if (isa<GlobalAddressSDNode>(Callee)) {
1947 auto *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
1949 bool PreferIndirect =
1952 return isa<Instruction>(U) && cast<Instruction>(U)->getParent() == BB;
1955 if (!PreferIndirect) {
1974 "Windows is the only supported COFF target");
1992 const char *Sym = S->getSymbol();
1997 ARMPCLabelIndex, 4);
2013 if ((!isDirect || isARMFunc) && !Subtarget->
hasV5TOps())
2018 if (!isDirect && !Subtarget->
hasV5TOps())
2029 std::vector<SDValue> Ops;
2030 Ops.push_back(Chain);
2031 Ops.push_back(Callee);
2035 for (
unsigned i = 0, e = RegsToPass.size(); i != e; ++
i)
2036 Ops.push_back(DAG.
getRegister(RegsToPass[i].first,
2037 RegsToPass[i].second.getValueType()));
2050 isThisReturn =
false;
2056 assert(Mask &&
"Missing call preserved mask for calling convention");
2061 Ops.push_back(InFlag);
2070 Chain = DAG.
getNode(CallOpc, dl, NodeTys, Ops);
2080 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
2081 InVals, isThisReturn,
2082 isThisReturn ? OutVals[0] :
SDValue());
2089 void ARMTargetLowering::HandleByVal(
CCState *State,
unsigned &Size,
2090 unsigned Align)
const {
2093 "unhandled ParmContext");
2096 Align = std::max(Align, 4U);
2102 unsigned AlignInRegs = Align / 4;
2103 unsigned Waste = (
ARM::R4 -
Reg) % AlignInRegs;
2104 for (
unsigned i = 0; i < Waste; ++
i)
2117 if (NSAAOffset != 0 && Size > Excess) {
2129 unsigned ByValRegBegin =
Reg;
2130 unsigned ByValRegEnd = std::min<unsigned>(Reg + Size / 4,
ARM::R4);
2134 for (
unsigned i = Reg + 1; i != ByValRegEnd; ++
i)
2140 Size = std::max<int>(Size - Excess, 0);
2165 }
else if (
LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
2191 ARMTargetLowering::IsEligibleForTailCallOptimization(
SDValue Callee,
2194 bool isCalleeStructRet,
2195 bool isCallerStructRet,
2217 if (isCalleeStructRet || isCallerStructRet)
2244 if (CalleeCC != CallerCC) {
2246 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2259 if (!Outs.
empty()) {
2263 ARMCCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C,
Call);
2265 if (CCInfo.getNextStackOffset()) {
2271 for (
unsigned i = 0, realArgIdx = 0, e = ArgLocs.
size();
2273 ++
i, ++realArgIdx) {
2276 SDValue Arg = OutVals[realArgIdx];
2287 if (!ArgLocs[++i].isRegLoc())
2290 if (!ArgLocs[++i].isRegLoc())
2292 if (!ArgLocs[++i].isRegLoc())
2317 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2339 if (IntKind ==
"" || IntKind ==
"IRQ" || IntKind ==
"FIQ" ||
2342 else if (IntKind ==
"SWI" || IntKind ==
"UNDEF")
2346 "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF");
2381 for (
unsigned i = 0, realRVLocIdx = 0;
2383 ++
i, ++realRVLocIdx) {
2387 SDValue Arg = OutVals[realRVLocIdx];
2406 HalfGPRs.
getValue(isLittleEndian ? 0 : 1),
2412 HalfGPRs.
getValue(isLittleEndian ? 1 : 0),
2427 fmrrd.
getValue(isLittleEndian ? 0 : 1),
2433 fmrrd.
getValue(isLittleEndian ? 1 : 0),
2450 else if (ARM::DPRRegClass.
contains(*I))
2478 bool ARMTargetLowering::isUsedByReturnOnly(
SDNode *
N,
SDValue &Chain)
const {
2502 if (Copies.
size() > 2)
2537 bool HasRet =
false;
2553 bool ARMTargetLowering::mayBeEmittedAsTailCall(
CallInst *CI)
const {
2559 if (!CI->
isTailCall() || Attr.getValueAsString() ==
"true")
2573 &&
"LowerWRITE_REGISTER called for non-i64 type argument.");
2612 unsigned ARMPCLabelIndex = 0;
2615 const BlockAddress *BA = cast<BlockAddressSDNode>(
Op)->getBlockAddress();
2618 if (!IsPositionIndependent) {
2621 unsigned PCAdj = Subtarget->
isThumb() ? 4 : 8;
2632 if (!IsPositionIndependent)
2663 ARMTargetLowering::LowerGlobalTLSAddressDarwin(
SDValue Op,
2670 SDValue DescAddr = LowerGlobalAddressDarwin(Op, DAG);
2707 ARMTargetLowering::LowerGlobalTLSAddressWindows(
SDValue Op,
2751 const auto *GA = cast<GlobalAddressSDNode>(
Op);
2767 unsigned char PCAdj = Subtarget->
isThumb() ? 4 : 8;
2787 Entry.Node = Argument;
2789 Args.push_back(Entry);
2797 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
2798 return CallResult.first;
2820 unsigned char PCAdj = Subtarget->
isThumb() ? 4 : 8;
2828 PtrVT, dl, Chain, Offset,
2836 PtrVT, dl, Chain, Offset,
2846 PtrVT, dl, Chain, Offset,
2858 return LowerGlobalTLSAddressDarwin(Op, DAG);
2861 return LowerGlobalTLSAddressWindows(Op, DAG);
2874 return LowerToTLSGeneralDynamicModel(GA, DAG);
2877 return LowerToTLSExecModels(GA, DAG, model);
2886 for (
auto *U : V->
users())
2888 while (!Worklist.
empty()) {
2890 if (isa<ConstantExpr>(U)) {
2891 for (
auto *UU : U->users())
2897 if (!I || I->getParent()->getParent() !=
F)
2907 for (
auto *U : V->
users())
2909 while (!Worklist.
empty()) {
2911 if (isa<ConstantExpr>(U)) {
2912 for (
auto *UU : U->users())
2917 if (!isa<Instruction>(U))
2927 Type *SubT =
nullptr;
2959 if (!GVar || !GVar->hasInitializer() ||
2960 !GVar->isConstant() || !GVar->hasGlobalUnnamedAddr() ||
2961 !GVar->hasLocalLinkage())
2967 auto *
Init = GVar->getInitializer();
2979 unsigned Align = GVar->getAlignment();
2980 unsigned RequiredPadding = 4 - (Size % 4);
2981 bool PaddingPossible =
2982 RequiredPadding == 4 || (CDAInit && CDAInit->isString());
2986 unsigned PaddedSize = Size + ((RequiredPadding == 4) ? 0 : RequiredPadding);
3008 if (RequiredPadding != 4) {
3013 while (RequiredPadding--)
3026 ++NumConstpoolPromoted;
3031 if (
const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
3033 return (isa<GlobalVariable>(GV) && cast<GlobalVariable>(GV)->isConstant()) ||
3041 const GlobalValue *GV = cast<GlobalAddressSDNode>(
Op)->getGlobal();
3058 unsigned PCAdj = Subtarget->
isThumb() ? 4 : 8;
3060 GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj,
3073 DAG.
getLoad(PtrVT, dl, Chain, Result,
3076 }
else if (Subtarget->
isROPI() && IsRO) {
3081 }
else if (Subtarget->
isRWPI() && !IsRO) {
3115 "ROPI/RWPI not currently supported for Darwin");
3118 const GlobalValue *GV = cast<GlobalAddressSDNode>(
Op)->getGlobal();
3141 "Windows on ARM expects to use movw/movt");
3143 "ROPI/RWPI not currently supported for Windows");
3145 const GlobalValue *GV = cast<GlobalAddressSDNode>(
Op)->getGlobal();
3159 if (GV->hasDLLImportStorageClass())
3181 SDValue ARMTargetLowering::LowerEH_SJLJ_SETUP_DISPATCH(
SDValue Op,
3191 unsigned IntNo = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
3195 case Intrinsic::thread_pointer: {
3199 case Intrinsic::eh_sjlj_lsda: {
3206 unsigned PCAdj = IsPositionIndependent ? (Subtarget->
isThumb() ? 4 : 8) : 0;
3216 if (IsPositionIndependent) {
3222 case Intrinsic::arm_neon_vmulls:
3223 case Intrinsic::arm_neon_vmullu: {
3224 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls)
3229 case Intrinsic::arm_neon_vminnm:
3230 case Intrinsic::arm_neon_vmaxnm: {
3231 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminnm)
3236 case Intrinsic::arm_neon_vminu:
3237 case Intrinsic::arm_neon_vmaxu: {
3240 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminu)
3245 case Intrinsic::arm_neon_vmins:
3246 case Intrinsic::arm_neon_vmaxs: {
3249 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins)
3254 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins)
3271 "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!");
3304 unsigned isRead = ~cast<ConstantSDNode>(Op.
getOperand(2))->getZExtValue() & 1;
3310 unsigned isData = cast<ConstantSDNode>(Op.
getOperand(4))->getZExtValue();
3313 isRead = ~isRead & 1;
3314 isData = ~isData & 1;
3340 const SDLoc &dl)
const {
3346 RC = &ARM::tGPRRegClass;
3348 RC = &ARM::GPRRegClass;
3383 const Value *OrigArg,
3384 unsigned InRegsParamRecordIdx,
3385 int ArgOffset,
unsigned ArgSize)
const {
3400 unsigned RBegin, REnd;
3410 ArgOffset = -4 * (
ARM::R4 - RBegin);
3420 for (
unsigned Reg = RBegin, i = 0; Reg < REnd; ++
Reg, ++
i) {
3429 if (!MemOps.
empty())
3438 unsigned TotalArgRegsSaveSize,
3439 bool ForceMutable)
const {
3448 int FrameIndex = StoreByValRegs(CCInfo, DAG, dl, Chain,
nullptr,
3454 SDValue ARMTargetLowering::LowerFormalArguments(
3472 unsigned CurArgIdx = 0;
3484 unsigned ArgRegBegin =
ARM::R4;
3485 for (
unsigned i = 0, e = ArgLocs.size(); i != e; ++
i) {
3496 unsigned RBegin, REnd;
3498 ArgRegBegin =
std::min(ArgRegBegin, RBegin);
3504 int lastInsIndex = -1;
3511 unsigned TotalArgRegsSaveSize = 4 * (
ARM::R4 - ArgRegBegin);
3515 for (
unsigned i = 0, e = ArgLocs.size(); i != e; ++
i) {
3517 if (Ins[VA.
getValNo()].isOrigArg()) {
3519 Ins[VA.
getValNo()].getOrigArgIndex() - CurArgIdx);
3520 CurArgIdx = Ins[VA.
getValNo()].getOrigArgIndex();
3530 SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i],
3541 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i],
3546 ArgValue, ArgValue1,
3549 ArgValue, ArgValue2,
3552 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
3558 RC = &ARM::SPRRegClass;
3560 RC = &ARM::DPRRegClass;
3562 RC = &ARM::QPRRegClass;
3565 : &ARM::GPRRegClass;
3607 if (index != lastInsIndex)
3616 assert(Ins[index].isOrigArg() &&
3617 "Byval arguments cannot be implicit");
3620 int FrameIndex = StoreByValRegs(
3621 CCInfo, DAG, dl, Chain, &*CurOrigArg, CurByValIndex,
3636 lastInsIndex = index;
3643 VarArgStyleRegisters(CCInfo, DAG, dl, Chain,
3645 TotalArgRegsSaveSize);
3655 return CFP->getValueAPF().isPosZero();
3661 if (
const ConstantFP *CFP = dyn_cast<ConstantFP>(
CP->getConstVal()))
3662 return CFP->getValueAPF().isPosZero();
3680 const SDLoc &dl)
const {
3682 unsigned C = RHSC->getZExtValue();
3768 std::pair<SDValue, SDValue>
3808 return std::make_pair(Value, OverflowCmp);
3820 std::tie(Value, OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc);
3829 ARMcc, CCR, OverflowCmp);
3851 std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc);
3855 return getCMOV(dl, VT, SelectTrue, SelectFalse, ARMcc, CCR,
3870 if (CMOVTrue && CMOVFalse) {
3872 unsigned CMOVFalseVal = CMOVFalse->getZExtValue();
3876 if (CMOVTrueVal == 1 && CMOVFalseVal == 0) {
3878 False = SelectFalse;
3879 }
else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) {
3890 return getCMOV(dl, VT, True, False, ARMcc, CCR, Cmp, DAG);
3906 bool &swpCmpOps,
bool &swpVselOps) {
3934 swpCmpOps = !swpCmpOps;
3935 swpVselOps = !swpVselOps;
3971 ARMcc, CCR, duplicateCmp(Cmp, DAG));
3998 ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal))) ||
4000 ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal)));
4008 ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal))) ||
4010 ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal)));
4035 const SDValue Op2 = isa<ConstantSDNode>(TrueVal1) ? FalseVal1 : TrueVal1;
4047 SDValue *K1 = isa<ConstantSDNode>(LHS1) ? &LHS1 : isa<ConstantSDNode>(RHS1)
4050 SDValue *K2 = isa<ConstantSDNode>(LHS2) ? &LHS2 : isa<ConstantSDNode>(RHS2)
4053 SDValue K2Tmp = isa<ConstantSDNode>(TrueVal2) ? TrueVal2 : FalseVal2;
4054 SDValue V1Tmp = (K1 && *K1 == LHS1) ? RHS1 : LHS1;
4055 SDValue V2Tmp = (K2 && *K2 == LHS2) ? RHS2 : LHS2;
4056 SDValue V2 = (K2Tmp == TrueVal2) ? FalseVal2 : TrueVal2;
4068 if (!K1 || !K2 || *K1 == Op2 || *K2 != K2Tmp || V1Tmp != V2Tmp ||
4084 if (!UpperCheckOp || !LowerCheckOp || LowerCheckOp == UpperCheckOp)
4090 int64_t Val1 = cast<ConstantSDNode>(*K1)->getSExtValue();
4091 int64_t Val2 = cast<ConstantSDNode>(*K2)->getSExtValue();
4092 int64_t PosVal = std::max(Val1, Val2);
4094 if (((Val1 > Val2 && UpperCheckOp == &Op) ||
4095 (Val1 < Val2 && UpperCheckOp == &Op2)) &&
4099 K = (uint64_t)PosVal;
4113 uint64_t SatConstant;
4160 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
4161 return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
4170 bool swpCmpOps =
false;
4171 bool swpVselOps =
false;
4184 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
4186 SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
4190 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl);
4191 Result = getCMOV(dl, VT, Result, TrueVal, ARMcc2, CCR, Cmp2, DAG);
4223 if (
LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op))
4225 Ld->getPointerInfo(), Ld->getAlignment(),
4226 Ld->getMemOperand()->getFlags());
4241 if (
LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) {
4245 Ld->getAlignment(), Ld->getMemOperand()->getFlags());
4248 unsigned NewAlign =
MinAlign(Ld->getAlignment(), 4);
4252 Ld->getPointerInfo().getWithOffset(4), NewAlign,
4253 Ld->getMemOperand()->getFlags());
4271 bool LHSSeenZero =
false;
4273 bool RHSSeenZero =
false;
4275 if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) {
4291 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
4294 Chain, Dest, ARMcc, CCR, Cmp);
4306 SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest };
4335 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
4338 Chain, Dest, ARMcc, CCR, Cmp);
4346 if (
SDValue Result = OptimizeVFPBrcond(Op, DAG))
4354 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
4357 SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp };
4396 DAG.
getLoad(PTy, dl, Chain, Addr,
4414 "Invalid type for custom lowering!");
4435 false,
SDLoc(Op)).first;
4452 "Invalid type for custom lowering!");
4471 return DAG.
getNode(Opc, dl, VT, Op);
4487 false,
SDLoc(Op)).first;
4502 bool UseNEON = !InGPR && Subtarget->
hasNEON();
4585 unsigned Depth = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
4587 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
4608 unsigned Depth = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
4619 unsigned ARMTargetLowering::getRegisterByName(
const char* RegName,
EVT VT,
4622 .Case(
"sp", ARM::SP)
4638 &&
"ExpandREAD_REGISTER called for non-i64 type result.");
4683 NewIndex *= APIntIndex;
4685 if (NewIndex.getBitWidth() > 32)
4715 "ExpandBITCAST called for non-i64 type");
4789 SDValue LoBigShift = DAG.
getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
4798 ? DAG.getNode(Opc, dl, VT, ShOpHi,
4799 DAG.getConstant(VTBits - 1, dl, VT))
4800 : DAG.getConstant(0, dl, VT);
4801 SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
4807 return DAG.getMergeValues(Ops, dl);
4816 unsigned VTBits = VT.getSizeInBits();
4839 SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
4843 DAG.getConstant(0, dl, VT), ARMcc, CCR, CmpLo);
4846 return DAG.getMergeValues(Ops, dl);
4889 if ((ElemTy ==
MVT::i16 || ElemTy == MVT::i32) &&
4936 if (ElemTy == MVT::i32)
5050 assert(ST->
hasNEON() &&
"Custom ctpop lowering requires NEON.");
5053 "Unexpected type for custom ctpop lowering");
5090 Intrinsic::arm_neon_vshifts :
5091 Intrinsic::arm_neon_vshiftu);
5107 "Unknown shift to lower!");
5136 bool Invert =
false;
5145 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->
get();
5162 Merged = DAG.
getNOT(dl, Merged, CmpVT);
5172 switch (SetCCOpcode) {
5213 switch (SetCCOpcode) {
5279 Result = DAG.
getNode(Opc, dl, CmpVT, Op0, Op1);
5282 Result = DAG.
getNode(Opc, dl, CmpVT, Op0, Op1);
5288 Result = DAG.
getNOT(dl, Result, VT);
5309 IntCCToARMCC(cast<CondCodeSDNode>(Cond)->
get()), DL, MVT::i32);
5322 const SDLoc &dl,
EVT &VT,
bool is128Bits,
5324 unsigned OpCmode, Imm;
5334 switch (SplatBitSize) {
5339 assert((SplatBits & ~0xff) == 0 &&
"one byte splat value is too big");
5348 if ((SplatBits & ~0xff) == 0) {
5354 if ((SplatBits & ~0xff00) == 0) {
5357 Imm = SplatBits >> 8;
5368 if ((SplatBits & ~0xff) == 0) {
5374 if ((SplatBits & ~0xff00) == 0) {
5377 Imm = SplatBits >> 8;
5380 if ((SplatBits & ~0xff0000) == 0) {
5383 Imm = SplatBits >> 16;
5386 if ((SplatBits & ~0xff000000) == 0) {
5389 Imm = SplatBits >> 24;
5396 if ((SplatBits & ~0xffff) == 0 &&
5397 ((SplatBits | SplatUndef) & 0xff) == 0xff) {
5400 Imm = SplatBits >> 8;
5404 if ((SplatBits & ~0xffffff) == 0 &&
5405 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
5408 Imm = SplatBits >> 16;
5423 uint64_t BitMask = 0xff;
5427 for (
int ByteNum = 0; ByteNum < 8; ++ByteNum) {
5428 if (((SplatBits | SplatUndef) & BitMask) == BitMask) {
5431 }
else if ((SplatBits & BitMask) != 0) {
5440 Imm = ((Imm & 0xf) << 4) | ((Imm & 0xf0) >> 4);
5465 APInt INTVal = FPVal.bitcastToAPInt();
5512 uint64_t iVal = FPVal.bitcastToAPInt().getZExtValue();
5517 if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32))
5571 unsigned ExpectedElt = Imm;
5572 for (
unsigned i = 1; i < NumElts; ++
i) {
5576 if (ExpectedElt == NumElts)
5579 if (M[i] < 0)
continue;
5580 if (ExpectedElt != static_cast<unsigned>(M[i]))
5589 bool &ReverseVEXT,
unsigned &Imm) {
5591 ReverseVEXT =
false;
5602 unsigned ExpectedElt = Imm;
5603 for (
unsigned i = 1; i < NumElts; ++
i) {
5607 if (ExpectedElt == NumElts * 2) {
5612 if (M[i] < 0)
continue;
5613 if (ExpectedElt != static_cast<unsigned>(M[i]))
5628 assert((BlockSize==16 || BlockSize==32 || BlockSize==64) &&
5629 "Only possible block sizes for VREV are: 16, 32, 64");
5636 unsigned BlockElts = M[0] + 1;
5639 BlockElts = BlockSize / EltSz;
5641 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
5644 for (
unsigned i = 0; i < NumElts; ++
i) {
5645 if (M[i] < 0)
continue;
5646 if ((
unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts))
5687 if (M.
size() != NumElts && M.
size() != NumElts*2)
5695 for (
unsigned i = 0; i < M.
size(); i += NumElts) {
5696 if (M.
size() == NumElts * 2)
5697 WhichResult = i / NumElts;
5699 WhichResult = M[
i] == 0 ? 0 : 1;
5700 for (
unsigned j = 0; j < NumElts; j += 2) {
5701 if ((M[i+j] >= 0 && (
unsigned) M[i+j] != j + WhichResult) ||
5702 (M[i+j+1] >= 0 && (
unsigned) M[i+j+1] != j + NumElts + WhichResult))
5707 if (M.
size() == NumElts*2)
5722 if (M.
size() != NumElts && M.
size() != NumElts*2)
5725 for (
unsigned i = 0; i < M.
size(); i += NumElts) {
5726 if (M.
size() == NumElts * 2)
5727 WhichResult = i / NumElts;
5729 WhichResult = M[
i] == 0 ? 0 : 1;
5730 for (
unsigned j = 0; j < NumElts; j += 2) {
5731 if ((M[i+j] >= 0 && (
unsigned) M[i+j] != j + WhichResult) ||
5732 (M[i+j+1] >= 0 && (
unsigned) M[i+j+1] != j + WhichResult))
5737 if (M.
size() == NumElts*2)
5757 if (M.
size() != NumElts && M.
size() != NumElts*2)
5760 for (
unsigned i = 0; i < M.
size(); i += NumElts) {
5761 WhichResult = M[
i] == 0 ? 0 : 1;
5762 for (
unsigned j = 0; j < NumElts; ++j) {
5763 if (M[i+j] >= 0 && (
unsigned) M[i+j] != 2 * j + WhichResult)
5768 if (M.
size() == NumElts*2)
5787 if (M.
size() != NumElts && M.
size() != NumElts*2)
5790 unsigned Half = NumElts / 2;
5791 for (
unsigned i = 0; i < M.
size(); i += NumElts) {
5792 WhichResult = M[
i] == 0 ? 0 : 1;
5793 for (
unsigned j = 0; j < NumElts; j += Half) {
5794 unsigned Idx = WhichResult;
5795 for (
unsigned k = 0; k < Half; ++k) {
5796 int MIdx = M[i + j + k];
5797 if (MIdx >= 0 && (
unsigned) MIdx != Idx)
5804 if (M.
size() == NumElts*2)
5828 if (M.
size() != NumElts && M.
size() != NumElts*2)
5831 for (
unsigned i = 0; i < M.
size(); i += NumElts) {
5832 WhichResult = M[
i] == 0 ? 0 : 1;
5833 unsigned Idx = WhichResult * NumElts / 2;
5834 for (
unsigned j = 0; j < NumElts; j += 2) {
5835 if ((M[i+j] >= 0 && (
unsigned) M[i+j] != Idx) ||
5836 (M[i+j+1] >= 0 && (
unsigned) M[i+j+1] != Idx + NumElts))
5842 if (M.
size() == NumElts*2)
5861 if (M.
size() != NumElts && M.
size() != NumElts*2)
5864 for (
unsigned i = 0; i < M.
size(); i += NumElts) {
5865 WhichResult = M[
i] == 0 ? 0 : 1;
5866 unsigned Idx = WhichResult * NumElts / 2;
5867 for (
unsigned j = 0; j < NumElts; j += 2) {
5868 if ((M[i+j] >= 0 && (
unsigned) M[i+j] != Idx) ||
5869 (M[i+j+1] >= 0 && (
unsigned) M[i+j+1] != Idx))
5875 if (M.
size() == NumElts*2)
5888 unsigned &WhichResult,
5891 if (
isVTRNMask(ShuffleMask, VT, WhichResult))
5893 if (
isVUZPMask(ShuffleMask, VT, WhichResult))
5895 if (
isVZIPMask(ShuffleMask, VT, WhichResult))
5913 if (NumElts != M.
size())
5917 for (
unsigned i = 0; i != NumElts; ++
i)
5918 if (M[i] >= 0 && M[i] != (
int) (NumElts - 1 -
i))
5930 if (!isa<ConstantSDNode>(N))
5932 Val = cast<ConstantSDNode>(
N)->getZExtValue();
5935 if (Val <= 255 || ~Val <= 255)
5952 APInt SplatBits, SplatUndef;
5953 unsigned SplatBitSize;
5955 if (BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
5959 if (SplatBitSize <= 64) {
5972 uint64_t NegatedImm = (~SplatBits).getZExtValue();
5986 SDValue Val = DAG.getTargetConstant(ImmVal, dl, MVT::i32);
6001 bool isOnlyLowElement =
true;
6002 bool usesOnlyOneValue =
true;
6003 bool hasDominantValue =
false;
6004 bool isConstant =
true;
6010 for (
unsigned i = 0; i < NumElts; ++
i) {
6015 isOnlyLowElement =
false;
6016 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
6019 ValueCounts.
insert(std::make_pair(V, 0));
6020 unsigned &Count = ValueCounts[V];
6023 if (++Count > (NumElts / 2)) {
6024 hasDominantValue =
true;
6028 if (ValueCounts.
size() != 1)
6029 usesOnlyOneValue =
false;
6031 Value = ValueCounts.
begin()->first;
6033 if (ValueCounts.
size() == 0)
6034 return DAG.getUNDEF(VT);
6045 if (hasDominantValue && EltSize <= 32) {
6066 Value, DAG.getConstant(index, dl, MVT::i32)),
6067 DAG.getConstant(index, dl, MVT::i32));
6074 if (!usesOnlyOneValue) {
6077 for (
unsigned I = 0; I < NumElts; ++
I) {
6083 Ops.
push_back(DAG.getConstant(I, dl, MVT::i32));
6091 for (
unsigned i = 0; i < NumElts; ++
i)
6095 SDValue Val = DAG.getBuildVector(VecVT, dl, Ops);
6096 Val = LowerBUILD_VECTOR(Val, DAG, ST);
6100 if (usesOnlyOneValue) {
6102 if (isConstant && Val.
getNode())
6115 SDValue shuffle = ReconstructShuffle(Op, DAG);
6127 DAG.getBuildVector(HVT, dl,
makeArrayRef(&Ops[0], NumElts / 2));
6129 Lower = LowerBUILD_VECTOR(Lower, DAG, ST);
6131 HVT, dl,
makeArrayRef(&Ops[NumElts / 2], NumElts / 2));
6133 Upper = LowerBUILD_VECTOR(Upper, DAG, ST);
6141 if (EltSize >= 32) {
6147 for (
unsigned i = 0; i < NumElts; ++
i)
6159 if (!isConstant && !usesOnlyOneValue) {
6160 SDValue Vec = DAG.getUNDEF(VT);
6161 for (
unsigned i = 0 ; i < NumElts; ++
i) {
6165 SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i32);
6183 struct ShuffleSourceInfo {
6199 ShuffleSourceInfo(
SDValue Vec)
6200 : Vec(Vec), MinElt(UINT_MAX), MaxElt(0), ShuffleVec(Vec), WindowBase(0),
6207 for (
unsigned i = 0; i < NumElts; ++
i) {
6215 }
else if (!isa<ConstantSDNode>(V.
getOperand(1))) {
6228 unsigned EltNo = cast<ConstantSDNode>(V.
getOperand(1))->getZExtValue();
6235 if (Sources.
size() > 2)
6241 for (
auto &
Source : Sources) {
6242 EVT SrcEltTy =
Source.Vec.getValueType().getVectorElementType();
6243 if (SrcEltTy.
bitsLT(SmallestEltTy))
6244 SmallestEltTy = SrcEltTy;
6246 unsigned ResMultiplier =
6254 for (
auto &Src : Sources) {
6280 if (Src.MaxElt - Src.MinElt >= NumSrcElts) {
6285 if (Src.MinElt >= NumSrcElts) {
6290 Src.WindowBase = -NumSrcElts;
6291 }
else if (Src.MaxElt < NumSrcElts) {
6308 Src.WindowBase = -Src.MinElt;
6315 for (
auto &Src : Sources) {
6317 if (SrcEltTy == SmallestEltTy)
6322 Src.WindowBase *= Src.WindowScale;
6327 for (
auto Src : Sources)
6340 int EltNo = cast<ConstantSDNode>(Entry.
getOperand(1))->getSExtValue();
6348 int LanesDefined = BitsDefined / BitsPerShuffleLane;
6352 int *LaneMask = &Mask[i * ResMultiplier];
6354 int ExtractBase = EltNo * Src->WindowScale + Src->WindowBase;
6355 ExtractBase += NumElts * (Src - Sources.begin());
6356 for (
int j = 0; j < LanesDefined; ++j)
6357 LaneMask[j] = ExtractBase + j;
6366 assert(Sources.size() <= 2 &&
"Too many sources!");
6369 for (
unsigned i = 0; i < Sources.size(); ++
i)
6370 ShuffleOps[i] = Sources[i].ShuffleVec;
6373 ShuffleOps[1], Mask);
6386 unsigned PFIndexes[4];
6387 for (
unsigned i = 0; i != 4; ++
i) {
6391 PFIndexes[
i] = M[
i];
6395 unsigned PFTableIndex =
6396 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
6398 unsigned Cost = (PFEntry >> 30);
6404 bool ReverseVEXT, isV_UNDEF;
6405 unsigned Imm, WhichResult;
6408 return (EltSize >= 32 ||
6424 unsigned OpNum = (PFEntry >> 26) & 0x0F;
6425 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
6426 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
6446 if (OpNum == OP_COPY) {
6447 if (LHSID == (1*9+2)*9+3)
return LHS;
6448 assert(LHSID == ((4*9+5)*9+6)*9+7 &&
"Illegal OP_COPY!");
6475 OpLHS, DAG.
getConstant(OpNum-OP_VDUP0, dl, MVT::i32));
6481 DAG.
getConstant(OpNum - OP_VEXT1 + 1, dl, MVT::i32));
6485 OpLHS, OpRHS).getValue(OpNum-OP_VUZPL);
6489 OpLHS, OpRHS).getValue(OpNum-OP_VZIPL);
6493 OpLHS, OpRHS).getValue(OpNum-OP_VTRNL);
6507 I = ShuffleMask.
begin(),
E = ShuffleMask.
end(); I !=
E; ++
I)
6525 "Expect an v8i16/v16i8 type");
6530 unsigned ExtractNum = (VT ==
MVT::v16i8) ? 8 : 4;
6551 if (EltSize <= 32) {
6555 if (Lane == -1) Lane = 0;
6566 bool IsScalarToVector =
true;
6569 IsScalarToVector =
false;
6572 if (IsScalarToVector)
6581 if (
isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) {
6605 unsigned WhichResult;
6608 ShuffleMask, VT, WhichResult, isV_UNDEF)) {
6612 .getValue(WhichResult);
6638 }) &&
"Unexpected shuffle index into UNDEF operand!");
6641 ShuffleMask, SubVT, WhichResult, isV_UNDEF)) {
6644 assert((WhichResult == 0) &&
6645 "In-place shuffle of concat can only have one result!");
6658 unsigned PFIndexes[4];
6659 for (
unsigned i = 0; i != 4; ++
i) {
6660 if (ShuffleMask[i] < 0)
6663 PFIndexes[
i] = ShuffleMask[
i];
6667 unsigned PFTableIndex =
6668 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
6670 unsigned Cost = (PFEntry >> 30);
6677 if (EltSize >= 32) {
6685 for (
unsigned i = 0; i < NumElts; ++
i) {
6686 if (ShuffleMask[i] < 0)
6690 ShuffleMask[i] < (
int)NumElts ? V1 : V2,
6711 if (!isa<ConstantSDNode>(Lane))
6720 if (!isa<ConstantSDNode>(Lane))
6736 "unexpected CONCAT_VECTORS");
6765 unsigned HiElt = 1 - LoElt;
6770 if (!Lo0 || !Hi0 || !Lo1 || !Hi1)
6773 if (Hi0->getSExtValue() == Lo0->
getSExtValue() >> 32 &&
6774 Hi1->getSExtValue() == Lo1->getSExtValue() >> 32)
6777 if (Hi0->isNullValue() && Hi1->isNullValue())
6790 unsigned HalfSize = EltSize / 2;
6792 if (!
isIntN(HalfSize, C->getSExtValue()))
6795 if (!
isUIntN(HalfSize, C->getZExtValue()))
6833 switch (OrigSimpleTy) {
6849 unsigned ExtOpcode) {
6916 unsigned EltSize = VT.getScalarSizeInBits() / 2;
6917 unsigned NumElts = VT.getVectorNumElements();
6921 for (
unsigned i = 0; i != NumElts; ++
i) {
6958 "unexpected type for custom-lowering ISD::MUL");
6961 unsigned NewOpc = 0;
6965 if (isN0SExt && isN1SExt)
6970 if (isN0ZExt && isN1ZExt)
6972 else if (isN1SExt || isN1ZExt) {
7006 "unexpected types for extended operands to VMULL");
7007 return DAG.
getNode(NewOpc, DL, VT, Op0, Op1);
7021 return DAG.
getNode(N0->getOpcode(), DL, VT,
7042 DAG.
getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
7076 DAG.
getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
7079 DAG.
getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
7101 "unexpected type for custom-lowering ISD::SDIV");
7137 "unexpected type for custom-lowering ISD::UDIV");
7183 DAG.
getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
7186 DAG.
getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
7190 DAG.
getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
7214 bool ExtraOp =
false;
7249 bool ShouldUseSRet = Subtarget->
isAPCS_ABI();
7251 if (ShouldUseSRet) {
7253 const uint64_t ByteSize =
DL.getTypeAllocSize(RetTy);
7254 const unsigned StackAlign =
DL.getPrefTypeAlignment(RetTy);
7255 int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign,
false);
7261 Entry.isSExt =
false;
7262 Entry.isZExt =
false;
7263 Entry.isSRet =
true;
7264 Args.push_back(Entry);
7271 Entry.isSExt =
false;
7272 Entry.isZExt =
false;
7273 Args.push_back(Entry);
7275 const char *LibcallName =
7276 (ArgVT ==
MVT::f64) ?
"__sincos_stret" :
"__sincosf_stret";
7285 .setCallee(CC, RetTy, Callee, std::move(Args))
7286 .setDiscardResult(ShouldUseSRet);
7287 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
7290 return CallResult.first;
7311 "unexpected type for custom lowering DIV");
7317 const char *
Name =
nullptr;
7319 Name = (VT ==
MVT::i32) ?
"__rt_sdiv" :
"__rt_sdiv64";
7321 Name = (VT ==
MVT::i32) ?
"__rt_udiv" :
"__rt_udiv64";
7327 for (
auto AI : {1, 0}) {
7331 Args.push_back(Arg);
7334 CallLoweringInfo CLI(DAG);
7338 ES, std::move(Args));
7344 bool Signed)
const {
7346 "unexpected type for custom lowering DIV");
7352 return LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK);
7368 void ARMTargetLowering::ExpandDIV_Windows(
7375 "unexpected type for custom lowering DIV");
7380 SDValue Result = LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK);
7409 DAG.
getConstant(Intrinsic::arm_mrc, DL, MVT::i32),
7434 const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 };
7443 "AtomicCmpSwap on types less than 64 should be legal");
7449 ARM::CMP_SWAP_64,
SDLoc(N),
7454 MemOp[0] = cast<MemSDNode>(
N)->getMemOperand();
7455 cast<MachineSDNode>(CmpSwap)->setMemRefs(MemOp, MemOp + 1);
7469 "Custom lowering is MSVCRT specific!");
7484 Args.push_back(Entry);
7486 Entry.
Node = Exponent;
7489 Args.push_back(Entry);
7501 F->getReturnType() == LCRTy;
7510 std::pair<SDValue, SDValue> CI = TLI.
LowerCallTo(CLI);
7513 return !CI.second.getNode() ? DAG.
getRoot() : CI.first;
7529 return LowerGlobalAddressWindows(Op, DAG);
7531 return LowerGlobalAddressELF(Op, DAG);
7533 return LowerGlobalAddressDarwin(Op, DAG);
7579 return LowerDIV_Windows(Op, DAG,
true);
7583 return LowerDIV_Windows(Op, DAG,
false);
7593 return LowerXALUO(Op, DAG);
7601 return LowerDYNAMIC_STACKALLOC(Op, DAG);
7631 Res = LowerREM(N, DAG);
7635 Res = LowerDivRem(
SDValue(N, 0), DAG);
7667 "ROPI/RWPI not currently supported with SjLj");
7677 bool isThumb2 = Subtarget->
isThumb2();
7680 unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8;
7686 : &ARM::GPRRegClass;
7706 .addConstantPoolIndex(CPI)
7715 BuildMI(*MBB, MI, dl, TII->
get(ARM::tPICADD), NewVReg3)
7723 }
else if (isThumb) {
7733 .addConstantPoolIndex(CPI)
7736 BuildMI(*MBB, MI, dl, TII->
get(ARM::tPICADD), NewVReg2)
7750 BuildMI(*MBB, MI, dl, TII->
get(ARM::tADDframe), NewVReg5)
7765 .addConstantPoolIndex(CPI)
7780 void ARMTargetLowering::EmitSjLjDispatchBlock(
MachineInstr &MI,
7790 : &ARM::GPRnopcRegClass;
7795 unsigned MaxCSNum = 0;
7798 if (!BB->isEHPad())
continue;
7803 II = BB->begin(),
IE = BB->end(); II !=
IE; ++II) {
7804 if (!II->isEHLabel())
continue;
7806 MCSymbol *Sym = II->getOperand(0).getMCSymbol();
7811 CSI = CallSiteIdxs.
begin(),
CSE = CallSiteIdxs.
end();
7812 CSI !=
CSE; ++CSI) {
7813 CallSiteNumToLPad[*CSI].push_back(&*BB);
7814 MaxCSNum = std::max(MaxCSNum, *CSI);
7821 std::vector<MachineBasicBlock*> LPadList;
7823 LPadList.reserve(CallSiteNumToLPad.
size());
7824 for (
unsigned I = 1; I <= MaxCSNum; ++
I) {
7827 II = MBBList.
begin(),
IE = MBBList.
end(); II !=
IE; ++II) {
7828 LPadList.push_back(*II);
7829 InvokeBBs.
insert((*II)->pred_begin(), (*II)->pred_end());
7833 assert(!LPadList.empty() &&
7834 "No landing pad destinations for the dispatch jump table!");
7848 unsigned trap_opcode;
7850 trap_opcode = ARM::tTRAP;
7867 SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI);
7874 MIB =
BuildMI(DispatchBB, dl, TII->
get(ARM::Int_eh_sjlj_dispatchsetup));
7886 unsigned NumLPads = LPadList.size();
7894 if (NumLPads < 256) {
7897 .
addImm(LPadList.size()));
7901 .addImm(NumLPads & 0xFFFF));
7903 unsigned VReg2 = VReg1;
7904 if ((NumLPads & 0xFFFF0000) != 0) {
7908 .
addImm(NumLPads >> 16));
7916 BuildMI(DispatchBB, dl, TII->
get(ARM::t2Bcc))
7923 .addJumpTableIndex(MJTI));
7928 BuildMI(DispContBB, dl, TII->
get(ARM::t2ADDrs), NewVReg4)
7933 BuildMI(DispContBB, dl, TII->
get(ARM::t2BR_JT))
7937 }
else if (Subtarget->
isThumb()) {
7944 if (NumLPads < 256) {
7981 .addJumpTableIndex(MJTI));
7998 unsigned NewVReg6 = NewVReg5;
7999 if (IsPositionIndependent) {
8007 BuildMI(DispContBB, dl, TII->
get(ARM::tBR_JTr))
8017 if (NumLPads < 256) {
8024 .addImm(NumLPads & 0xFFFF));
8026 unsigned VReg2 = VReg1;
8027 if ((NumLPads & 0xFFFF0000) != 0) {
8031 .
addImm(NumLPads >> 16));
8070 .addJumpTableIndex(MJTI));
8076 BuildMI(DispContBB, dl, TII->
get(ARM::LDRrs), NewVReg5)
8082 if (IsPositionIndependent) {
8083 BuildMI(DispContBB, dl, TII->
get(ARM::BR_JTadd))
8088 BuildMI(DispContBB, dl, TII->
get(ARM::BR_JTr))
8096 for (std::vector<MachineBasicBlock*>::iterator
8097 I = LPadList.begin(),
E = LPadList.end(); I !=
E; ++
I) {
8099 if (SeenMBBs.
insert(CurMBB).second)
8112 while (!Successors.empty()) {
8121 BB->normalizeSuccProbs();
8128 II = BB->rbegin(),
IE = BB->rend(); II !=
IE; ++II) {
8129 if (!II->isCall())
continue;
8133 OI = II->operands_begin(), OE = II->operands_end();
8135 if (!OI->isReg())
continue;
8136 DefRegs[OI->getReg()] =
true;
8141 for (
unsigned i = 0; SavedRegs[
i] != 0; ++
i) {
8142 unsigned Reg = SavedRegs[
i];
8144 !ARM::tGPRRegClass.contains(Reg) &&
8145 !ARM::hGPRRegClass.contains(Reg))
8147 if (Subtarget->
isThumb1Only() && !ARM::tGPRRegClass.contains(Reg))
8149 if (!Subtarget->
isThumb() && !ARM::GPRRegClass.contains(Reg))
8162 I = MBBLPads.
begin(),
E = MBBLPads.
end(); I !=
E; ++
I)
8163 (*I)->setIsEHPad(
false);
8180 static unsigned getLdOpcode(
unsigned LdSize,
bool IsThumb1,
bool IsThumb2) {
8182 return LdSize == 16 ? ARM::VLD1q32wb_fixed
8183 : LdSize == 8 ? ARM::VLD1d32wb_fixed : 0;
8185 return LdSize == 4 ? ARM::tLDRi
8186 : LdSize == 2 ? ARM::tLDRHi
8187 : LdSize == 1 ? ARM::tLDRBi : 0;
8189 return LdSize == 4 ? ARM::t2LDR_POST
8190 : LdSize == 2 ? ARM::t2LDRH_POST
8191 : LdSize == 1 ? ARM::t2LDRB_POST : 0;
8192 return LdSize == 4 ? ARM::LDR_POST_IMM
8193 : LdSize == 2 ? ARM::LDRH_POST
8194 : LdSize == 1 ? ARM::LDRB_POST_IMM : 0;
8199 static unsigned getStOpcode(
unsigned StSize,
bool IsThumb1,
bool IsThumb2) {
8201 return StSize == 16 ? ARM::VST1q32wb_fixed
8202 : StSize == 8 ? ARM::VST1d32wb_fixed : 0;
8204 return StSize == 4 ? ARM::tSTRi
8205 : StSize == 2 ? ARM::tSTRHi
8206 : StSize == 1 ? ARM::tSTRBi : 0;
8208 return StSize == 4 ? ARM::t2STR_POST
8209 : StSize == 2 ? ARM::t2STRH_POST
8210 : StSize == 1 ? ARM::t2STRB_POST : 0;
8211 return StSize == 4 ? ARM::STR_POST_IMM
8212 : StSize == 2 ? ARM::STRH_POST
8213 : StSize == 1 ? ARM::STRB_POST_IMM : 0;
8220 unsigned LdSize,
unsigned Data,
unsigned AddrIn,
8221 unsigned AddrOut,
bool IsThumb1,
bool IsThumb2) {
8222 unsigned LdOpc =
getLdOpcode(LdSize, IsThumb1, IsThumb2);
8223 assert(LdOpc != 0 &&
"Should have a load opcode");
8228 }
else if (IsThumb1) {
8231 .addReg(AddrIn).
addImm(0));
8233 BuildMI(*BB, Pos, dl, TII->
get(ARM::tADDi8), AddrOut);
8235 MIB.addReg(AddrIn).addImm(LdSize);
8237 }
else if (IsThumb2) {
8252 unsigned StSize,
unsigned Data,
unsigned AddrIn,
8253 unsigned AddrOut,
bool IsThumb1,
bool IsThumb2) {
8254 unsigned StOpc =
getStOpcode(StSize, IsThumb1, IsThumb2);
8255 assert(StOpc != 0 &&
"Should have a store opcode");
8259 }
else if (IsThumb1) {
8264 BuildMI(*BB, Pos, dl, TII->
get(ARM::tADDi8), AddrOut);
8266 MIB.addReg(AddrIn).addImm(StSize);
8268 }
else if (IsThumb2) {
8296 unsigned UnitSize = 0;
8301 bool IsThumb2 = Subtarget->
isThumb2();
8302 bool IsThumb = Subtarget->
isThumb();
8306 }
else if (Align & 2) {
8312 if ((Align % 16 == 0) && SizeVal >= 16)
8314 else if ((Align % 8 == 0) && SizeVal >= 8)
8323 bool IsNeon = UnitSize >= 8;
8324 TRC = IsThumb ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
8326 VecTRC = UnitSize == 16 ? &ARM::DPairRegClass
8327 : UnitSize == 8 ? &ARM::DPRRegClass
8330 unsigned BytesLeft = SizeVal % UnitSize;
8331 unsigned LoopSize = SizeVal - BytesLeft;
8333 if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) {
8337 unsigned srcIn = src;
8338 unsigned destIn = dest;
8339 for (
unsigned i = 0; i < LoopSize; i+=UnitSize) {
8343 emitPostLd(BB, MI, TII, dl, UnitSize, scratch, srcIn, srcOut,
8344 IsThumb1, IsThumb2);
8345 emitPostSt(BB, MI, TII, dl, UnitSize, scratch, destIn, destOut,
8346 IsThumb1, IsThumb2);
8354 for (
unsigned i = 0; i < BytesLeft; i++) {
8358 emitPostLd(BB, MI, TII, dl, 1, scratch, srcIn, srcOut,
8359 IsThumb1, IsThumb2);
8360 emitPostSt(BB, MI, TII, dl, 1, scratch, destIn, destOut,
8361 IsThumb1, IsThumb2);
8401 if (Subtarget->
useMovt(*MF)) {
8402 unsigned Vtmp = varEnd;
8403 if ((LoopSize & 0xFFFF0000) != 0)
8406 TII->
get(IsThumb ? ARM::t2MOVi16 : ARM::MOVi16),
8407 Vtmp).addImm(LoopSize & 0xFFFF));
8409 if ((LoopSize & 0xFFFF0000) != 0)
8411 TII->
get(IsThumb ? ARM::t2MOVTi16 : ARM::MOVTi16),
8414 .
addImm(LoopSize >> 16));
8449 .addReg(varLoop).
addMBB(loopMBB)
8452 .addReg(srcLoop).
addMBB(loopMBB)
8455 .addReg(destLoop).
addMBB(loopMBB)
8462 IsThumb1, IsThumb2);
8464 IsThumb1, IsThumb2);
8469 BuildMI(*BB, BB->
end(), dl, TII->
get(ARM::tSUBi8), varLoop);
8471 MIB.addReg(varPhi).addImm(UnitSize);
8476 TII->
get(IsThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop);
8482 TII->
get(IsThumb1 ? ARM::tBcc : IsThumb2 ? ARM::t2Bcc : ARM::Bcc))
8491 auto StartOfExit = exitMBB->
begin();
8495 unsigned srcIn = srcLoop;
8496 unsigned destIn = destLoop;
8497 for (
unsigned i = 0; i < BytesLeft; i++) {
8501 emitPostLd(BB, StartOfExit, TII, dl, 1, scratch, srcIn, srcOut,
8502 IsThumb1, IsThumb2);
8503 emitPostSt(BB, StartOfExit, TII, dl, 1, scratch, destIn, destOut,
8504 IsThumb1, IsThumb2);
8514 ARMTargetLowering::EmitLowered__chkstk(
MachineInstr &MI,
8521 "__chkstk is only supported on Windows");
8522 assert(Subtarget->
isThumb2() &&
"Windows on ARM requires Thumb-2 mode");
8548 .addImm((
unsigned)ARMCC::AL).addReg(0)
8549 .addExternalSymbol("__chkstk")
8554 case CodeModel::
Large:
8560 .addExternalSymbol(
"__chkstk");
8562 .addImm((
unsigned)ARMCC::AL).addReg(0)
8563 .addReg(Reg, RegState::
Kill)
8573 .addReg(
ARM::SP, RegState::
Kill)
8574 .addReg(
ARM::R4, RegState::
Kill)
8577 MI.eraseFromParent();
8589 MF->
insert(++MBB->getIterator(), ContBB);
8593 MBB->addSuccessor(ContBB);
8596 BuildMI(TrapBB, DL, TII->
get(ARM::t__brkdiv0));
8598 MBB->addSuccessor(TrapBB);
8601 .addReg(MI.getOperand(0).getReg())
8608 MI.eraseFromParent();
8617 bool isThumb2 = Subtarget->
isThumb2();
8625 case ARM::tLDR_postidx: {
8626 BuildMI(*BB, MI, dl, TII->
get(ARM::tLDMIA_UPD))
8639 case ARM::t2STR_preidx:
8642 case ARM::t2STRB_preidx:
8645 case ARM::t2STRH_preidx:
8649 case ARM::STRi_preidx:
8650 case ARM::STRBi_preidx: {
8651 unsigned NewOpc = MI.
getOpcode() == ARM::STRi_preidx ? ARM::STR_PRE_IMM
8652 : ARM::STRB_PRE_IMM;
8668 .addMemOperand(MMO);
8672 case ARM::STRr_preidx:
8673 case ARM::STRBr_preidx:
8674 case ARM::STRH_preidx: {
8678 case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG;
break;
8679 case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG;
break;
8680 case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE;
break;
8689 case ARM::tMOVCCr_pseudo: {
8746 case ARM::BCCZi64: {
8752 bool RHSisZero = MI.
getOpcode() == ARM::BCCZi64;
8758 TII->
get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
8759 .addReg(LHS1).
addImm(0));
8760 BuildMI(BB, dl, TII->
get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
8767 TII->
get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
8768 .addReg(LHS1).
addReg(RHS1));
8769 BuildMI(BB, dl, TII->
get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
8770 .addReg(LHS2).
addReg(RHS2)
8779 BuildMI(BB, dl, TII->
get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
8790 case ARM::Int_eh_sjlj_setjmp:
8791 case ARM::Int_eh_sjlj_setjmp_nofp:
8792 case ARM::tInt_eh_sjlj_setjmp:
8793 case ARM::t2Int_eh_sjlj_setjmp:
8794 case ARM::t2Int_eh_sjlj_setjmp_nofp:
8797 case ARM::Int_eh_sjlj_setup_dispatch:
8798 EmitSjLjDispatchBlock(MI, BB);
8826 bool isThumb2 = Subtarget->
isThumb2();
8830 unsigned NewRsbDstReg =
8846 TII->
get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
8847 .addReg(ABSSrcReg).
addImm(0));
8851 TII->
get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB)
8858 TII->
get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg)
8865 TII->
get(ARM::PHI), ABSDstReg)
8866 .addReg(NewRsbDstReg).
addMBB(RSBBB)
8875 case ARM::COPY_STRUCT_BYVAL_I32:
8877 return EmitStructByval(MI, BB);
8879 return EmitLowered__chkstk(MI, BB);
8881 return EmitLowered__dbzchk(MI, BB);
8908 : &ARM::GPRRegClass);
8932 MCID = &TII->get(NewOpc);
8935 "converted opcode should be the same except for cc_out");
8947 assert(!NewOpc &&
"Optional cc_out operand required");
8952 bool definesCPSR =
false;
8953 bool deadCPSR =
false;
8966 assert(!NewOpc &&
"Optional cc_out operand required");
8972 "expect uninitialized optional cc_out operand");
9009 default:
return false;
9080 bool AllOnes =
false) {
9087 NonConstantVal, DAG))
9093 OtherOp, NonConstantVal);
9099 CCOp, TrueVal, FalseVal);
9191 Opcode = Intrinsic::arm_neon_vpaddls;
9193 Opcode = Intrinsic::arm_neon_vpaddlu;
9236 unsigned nextIndex = 0;
9259 || C1->getZExtValue() != nextIndex+1)
9301 return DAG.
getNode(ExtOp, dl, VT, tmp);
9334 if (AddcOp0.getNode() == AddcOp1.
getNode())
9339 "Expect ADDC with two result values. First: i32");
9363 "ADDE node has the wrong inputs");
9370 if (AddeOp0.getNode() == AddeOp1.
getNode())
9374 bool IsLeftOperandMUL =
false;
9379 IsLeftOperandMUL =
true;
9396 if (IsLeftOperandMUL)
9405 if (AddcOp0 == MULOp.
getValue(0)) {
9409 if (AddcOp1 == MULOp.
getValue(0)) {
9428 DAG.
getVTList(MVT::i32, MVT::i32), Ops);
9455 SDNode *PrevAddc =
nullptr;
9462 if (PrevAddc ==
nullptr)
9468 if (MLAL !=
SDValue(PrevAddc, 0))
9472 SDNode *UmlalNode =
nullptr;
9488 if (!
Zero ||
Zero->getZExtValue() != 0)
9509 DAG.
getVTList(MVT::i32, MVT::i32), Ops);
9633 return DAG.
getNode(Opcode, DL, VT,
9660 unsigned ShiftAmt = countTrailingZeros<uint64_t>(MulAmt);
9662 ShiftAmt = ShiftAmt & (32 - 1);
9667 MulAmt >>= ShiftAmt;
9689 uint64_t MulAmtAbs = -MulAmt;
9735 APInt SplatBits, SplatUndef;
9736 unsigned SplatBitSize;
9739 BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
9740 if (SplatBitSize <= 64) {
9777 APInt SplatBits, SplatUndef;
9778 unsigned SplatBitSize;
9780 if (BVN && Subtarget->
hasNEON() &&
9781 BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
9782 if (SplatBitSize <= 64) {
9815 unsigned SplatBitSize;
9818 APInt SplatBits0, SplatBits1;
9822 if (BVN0 && BVN0->
isConstantSplat(SplatBits0, SplatUndef, SplatBitSize,
9823 HasAnyUndefs) && !HasAnyUndefs) {
9824 if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize,
9825 HasAnyUndefs) && !HasAnyUndefs) {
9830 SplatBits0 == ~SplatBits1) {
9882 if ((Val & ~Mask) != Val)
9910 (Mask == 0xffff || Mask == 0xffff0000))
9926 (Mask2 == 0xffff || Mask2 == 0xffff0000))
9946 unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue();
9986 ToMask = ~cast<ConstantSDNode>(N->
getOperand(2))->getAPIntValue();
9991 if (From->getOpcode() ==
ISD::SRL &&
9992 isa<ConstantSDNode>(From->getOperand(1))) {
9993 APInt Shift = cast<ConstantSDNode>(From->getOperand(1))->getAPIntValue();
9996 From = From->getOperand(0);
10008 return LastActiveBitInA - 1 == FirstActiveBitInB;
10014 APInt ToMask, FromMask;
10022 APInt CombinedToMask = ToMask;
10024 APInt NewToMask, NewFromMask;
10026 if (NewFrom != From) {
10028 CombinedToMask |= NewToMask;
10034 if ((NewToMask & CombinedToMask).getBoolValue())
10047 CombinedToMask |= NewToMask;
10064 unsigned InvMask = cast<ConstantSDNode>(N->
getOperand(2))->getZExtValue();
10068 static_cast<unsigned>(std::numeric_limits<unsigned>::digits) &&
10069 "undefined behavior");
10070 unsigned Mask = (1u << Width) - 1;
10072 if ((Mask & (~Mask2)) == 0)
10085 APInt ToMask1, FromMask1;
10088 APInt ToMask2, FromMask2;
10096 APInt NewFromMask = FromMask1 | FromMask2;
10097 APInt NewToMask = ToMask1 | ToMask2;
10102 if (NewFromMask[0] == 0)
10139 DAG.getConstant(4, DL, MVT::i32));
10140 SDValue NewLD2 = DAG.getLoad(
10178 for (
unsigned i = 0; i < NumElts; ++
i) {
10208 for (
unsigned i = 0; i < NumElts; ++
i) {
10255 unsigned NumOfBitCastedElts = 0;
10257 unsigned NumOfRelevantElts = NumElts;
10258 for (
unsigned Idx = 0; Idx < NumElts; ++Idx) {
10263 ++NumOfBitCastedElts;
10264 }
else if (Elt.
isUndef() || isa<ConstantSDNode>(Elt))
10267 --NumOfRelevantElts;
10271 if (NumOfBitCastedElts <= NumOfRelevantElts / 2)
10279 if (!TLI.isTypeLegal(VecVT))
10289 for (
unsigned Idx = 0 ; Idx < NumElts; ++Idx) {
10374 unsigned HalfElts = NumElts/2;
10376 for (
unsigned n = 0; n < NumElts; ++n) {
10379 if (MaskElt < (
int)HalfElts)
10381 else if (MaskElt >= (
int)NumElts && MaskElt < (
int)(NumElts + HalfElts))
10382 NewElt = HalfElts + MaskElt - NumElts;
10400 const unsigned AddrOpIdx = ((isIntrinsic || isStore) ? 2 : 1);
10410 UI.getUse().getResNo() != Addr.
getResNo())
10419 bool isLoadOp =
true;
10420 bool isLaneOp =
false;
10421 unsigned NewOpc = 0;
10422 unsigned NumVecs = 0;
10424 unsigned IntNo = cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue();
10428 NumVecs = 1;
break;
10430 NumVecs = 2;
break;
10432 NumVecs = 3;
break;
10434 NumVecs = 4;
break;
10436 NumVecs = 2; isLaneOp =
true;
break;
10438 NumVecs = 3; isLaneOp =
true;
break;
10440 NumVecs = 4; isLaneOp =
true;
break;
10442 NumVecs = 1; isLoadOp =
false;
break;
10444 NumVecs = 2; isLoadOp =
false;
break;
10446 NumVecs = 3; isLoadOp =
false;
break;
10448 NumVecs = 4; isLoadOp =
false;
break;
10450 NumVecs = 2; isLoadOp =
false; isLaneOp =
true;
break;
10452 NumVecs = 3; isLoadOp =
false; isLaneOp =
true;
break;
10454 NumVecs = 4; isLoadOp =
false; isLaneOp =
true;
break;
10465 NumVecs = 1; isLaneOp =
false;
break;
10467 NumVecs = 1; isLaneOp =
false; isLoadOp =
false;
break;
10475 }
else if (isIntrinsic) {
10478 assert(isStore &&
"Node has to be a load, a store, or an intrinsic!");
10489 uint64_t IncVal = CInc->getZExtValue();
10490 if (IncVal != NumBytes)
10492 }
else if (NumBytes >= 3 * 16) {
10501 EVT AlignedVecTy = VecTy;
10518 if (isa<LSBaseSDNode>(N)) {
10519 if (Alignment == 0)
10523 assert(NumVecs == 1 &&
"Unexpected multi-element generic load/store.");
10524 assert(!isLaneOp &&
"Unexpected generic load/store lane.");
10541 unsigned NumResultVecs = (isLoadOp ? NumVecs : 0);
10543 for (n = 0; n < NumResultVecs; ++n)
10544 Tys[n] = AlignedVecTy;
10555 if (
StoreSDNode *StN = dyn_cast<StoreSDNode>(N)) {
10581 for (
unsigned i = 0; i < NumResultVecs; ++
i)
10587 SDValue &LdVal = NewResults[0];
10623 unsigned NumVecs = 0;
10624 unsigned NewOpc = 0;
10625 unsigned IntNo = cast<ConstantSDNode>(VLD->
getOperand(1))->getZExtValue();
10626 if (IntNo == Intrinsic::arm_neon_vld2lane) {
10629 }
else if (IntNo == Intrinsic::arm_neon_vld3lane) {
10632 }
else if (IntNo == Intrinsic::arm_neon_vld4lane) {
10641 unsigned VLDLaneNo =
10642 cast<ConstantSDNode>(VLD->
getOperand(NumVecs+3))->getZExtValue();
10646 if (UI.getUse().getResNo() == NumVecs)
10650 VLDLaneNo != cast<ConstantSDNode>(User->
getOperand(1))->getZExtValue())
10657 for (n = 0; n < NumVecs; ++n)
10670 unsigned ResNo = UI.getUse().
getResNo();
10672 if (ResNo == NumVecs)
10680 std::vector<SDValue> VLDDupResults;
10681 for (
unsigned n = 0; n < NumVecs; ++n)
10682 VLDDupResults.push_back(
SDValue(VLDDup.getNode(), n));
10683 VLDDupResults.push_back(
SDValue(VLDDup.getNode(), NumVecs));
10710 unsigned Imm = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
10776 assert(StVT != VT &&
"Cannot truncate to the same type");
10785 if (0 != (NumElems * FromEltSz) % ToEltSz)
return SDValue();
10787 unsigned SizeRatio = FromEltSz / ToEltSz;
10792 NumElems*SizeRatio);
10798 for (
unsigned i = 0; i < NumElems; ++
i)
10800 ? (i + 1) * SizeRatio - 1
10815 if (TLI.
isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz)
10834 for (
unsigned I = 0; I <
E; I++) {
10836 StoreType, ShuffWide,
10925 if (!isa<BuildVectorSDNode>(ConstVec))
10933 if (FloatBits != 32 || IntBits > 32 || NumLanes > 4) {
10944 if (C == -1 || C == 0 || C > 32)
10949 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs :
10950 Intrinsic::arm_neon_vcvtfp2fxu;
10956 if (IntBits < FloatBits)
10983 if (!isa<BuildVectorSDNode>(ConstVec))
10991 if (FloatBits != 32 || IntBits > 32 || NumLanes > 4) {
11002 if (C == -1 || C == 0 || C > 32)
11008 if (IntBits < FloatBits)
11013 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp :
11014 Intrinsic::arm_neon_vcvtfxu2fp;
11029 APInt SplatBits, SplatUndef;
11030 unsigned SplatBitSize;
11032 if (! BVN || ! BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
11033 HasAnyUndefs, ElementBits) ||
11034 SplatBitSize > ElementBits)
11036 Cnt = SplatBits.getSExtValue();
11045 assert(VT.
isVector() &&
"vector shift count is not a vector type");
11049 return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits);
11060 assert(VT.
isVector() &&
"vector shift count is not a vector type");
11065 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits));
11066 if (Cnt >= -(isNarrow ? ElementBits/2 : ElementBits) && Cnt <= -1) {
11075 unsigned IntNo = cast<ConstantSDNode>(N->
getOperand(0))->getZExtValue();
11086 case Intrinsic::arm_neon_vshifts:
11087 case Intrinsic::arm_neon_vshiftu:
11088 case Intrinsic::arm_neon_vrshifts:
11089 case Intrinsic::arm_neon_vrshiftu:
11090 case Intrinsic::arm_neon_vrshiftn:
11091 case Intrinsic::arm_neon_vqshifts:
11092 case Intrinsic::arm_neon_vqshiftu:
11093 case Intrinsic::arm_neon_vqshiftsu:
11094 case Intrinsic::arm_neon_vqshiftns:
11095 case Intrinsic::arm_neon_vqshiftnu:
11096 case Intrinsic::arm_neon_vqshiftnsu:
11097 case Intrinsic::arm_neon_vqrshiftns:
11098 case Intrinsic::arm_neon_vqrshiftnu:
11099 case Intrinsic::arm_neon_vqrshiftnsu: {
11102 unsigned VShiftOpc = 0;
11105 case Intrinsic::arm_neon_vshifts:
11106 case Intrinsic::arm_neon_vshiftu:
11112 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ?
11118 case Intrinsic::arm_neon_vrshifts:
11119 case Intrinsic::arm_neon_vrshiftu:
11124 case Intrinsic::arm_neon_vqshifts:
11125 case Intrinsic::arm_neon_vqshiftu:
11130 case Intrinsic::arm_neon_vqshiftsu:
11135 case Intrinsic::arm_neon_vrshiftn:
11136 case Intrinsic::arm_neon_vqshiftns:
11137 case Intrinsic::arm_neon_vqshiftnu:
11138 case Intrinsic::arm_neon_vqshiftnsu:
11139 case Intrinsic::arm_neon_vqrshiftns:
11140 case Intrinsic::arm_neon_vqrshiftnu:
11141 case Intrinsic::arm_neon_vqrshiftnsu:
11153 case Intrinsic::arm_neon_vshifts:
11154 case Intrinsic::arm_neon_vshiftu:
11157 case Intrinsic::arm_neon_vrshifts:
11159 case Intrinsic::arm_neon_vrshiftu:
11161 case Intrinsic::arm_neon_vrshiftn:
11163 case Intrinsic::arm_neon_vqshifts:
11165 case Intrinsic::arm_neon_vqshiftu:
11167 case Intrinsic::arm_neon_vqshiftsu:
11169 case Intrinsic::arm_neon_vqshiftns:
11171 case Intrinsic::arm_neon_vqshiftnu:
11173 case Intrinsic::arm_neon_vqshiftnsu:
11175 case Intrinsic::arm_neon_vqrshiftns:
11177 case Intrinsic::arm_neon_vqrshiftnu:
11179 case Intrinsic::arm_neon_vqrshiftnsu:
11188 case Intrinsic::arm_neon_vshiftins: {
11191 unsigned VShiftOpc = 0;
11207 case Intrinsic::arm_neon_vqrshifts:
11208 case Intrinsic::arm_neon_vqrshiftu:
11286 if (VT == MVT::i32 &&
11289 isa<ConstantSDNode>(Lane)) {
11319 const APInt &Mask = CI->getAPIntValue();
11353 auto CCNode = cast<ConstantSDNode>(CMOV->
getOperand(2));
11354 auto CC = CCNode->getAPIntValue().getLimitedValue();
11391 unsigned Heuristic = Subtarget->
isThumb() ? 3 : 2;
11397 APInt KnownZero, KnownOne;
11399 if ((OrCI & KnownZero) != OrCI)
11414 for (
unsigned BitInY = 0, NumActiveBits = OrCI.
getActiveBits();
11415 BitInY < NumActiveBits; ++BitInY) {
11416 if (OrCI[BitInY] == 0)
11419 Mask.setBit(BitInY);
11455 if ((LHS00C && LHS00C->getZExtValue() == 0) &&
11456 (LHS01C && LHS01C->getZExtValue() == 1) &&
11457 (LHS1C && LHS1C->getZExtValue() == 1) &&
11458 (RHSC && RHSC->getZExtValue() == 0)) {
11511 if (CC ==
ARMCC::NE && FalseVal == RHS && FalseVal != LHS) {
11514 }
else if (CC ==
ARMCC::EQ && TrueVal == RHS) {
11527 if ((LHS0C && LHS0C->getZExtValue() == 0) &&
11528 (LHS1C && LHS1C->getZExtValue() == 1) &&
11529 (RHSC && RHSC->getZExtValue() == 0)) {
11537 APInt KnownZero, KnownOne;
11540 if (KnownZero == 0xfffffffe)
11543 else if (KnownZero == 0xffffff00)
11546 else if (KnownZero == 0xffff0000)
11598 switch (cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue()) {
11599 case Intrinsic::arm_neon_vld1:
11600 case Intrinsic::arm_neon_vld2:
11601 case Intrinsic::arm_neon_vld3:
11602 case Intrinsic::arm_neon_vld4:
11603 case Intrinsic::arm_neon_vld2lane:
11604 case Intrinsic::arm_neon_vld3lane:
11605 case Intrinsic::arm_neon_vld4lane:
11606 case Intrinsic::arm_neon_vst1:
11607 case Intrinsic::arm_neon_vst2:
11608 case Intrinsic::arm_neon_vst3:
11609 case Intrinsic::arm_neon_vst4:
11610 case Intrinsic::arm_neon_vst2lane:
11611 case Intrinsic::arm_neon_vst3lane:
11612 case Intrinsic::arm_neon_vst4lane:
11629 bool *
Fast)
const {
11640 if (AllowsUnaligned) {
11652 if (Subtarget->
hasNEON() && (AllowsUnaligned || Subtarget->
isLittle())) {
11663 unsigned AlignCheck) {
11664 return ((SrcAlign == 0 || SrcAlign % AlignCheck == 0) &&
11665 (DstAlign == 0 || DstAlign % AlignCheck == 0));
11669 unsigned DstAlign,
unsigned SrcAlign,
11670 bool IsMemset,
bool ZeroMemset,
11676 if ((!IsMemset || ZeroMemset) && Subtarget->
hasNEON() &&
11683 }
else if (Size >= 8 &&
11694 else if (Size >= 2)
11760 unsigned AS)
const {
11763 return AM.
Scale < 0 ? 1 : 0;
11774 unsigned Scale = 1;
11776 default:
return false;
11791 if ((V & (Scale - 1)) != 0)
11794 return V == (V & ((1LL << 5) - 1));
11799 bool isNeg =
false;
11806 default:
return false;
11813 return V == (V & ((1LL << 8) - 1));
11814 return V == (V & ((1LL << 12) - 1));
11823 return V == (V & ((1LL << 8) - 1));
11847 default:
return false;
11852 return V == (V & ((1LL << 12) - 1));
11855 return V == (V & ((1LL << 8) - 1));
11863 return V == (V & ((1LL << 8) - 1));
11869 int Scale = AM.
Scale;
11874 default:
return false;
11882 Scale = Scale & ~1;
11883 return Scale == 2 || Scale == 4 || Scale == 8;
11895 if (Scale & 1)
return false;
11904 unsigned AS)
const {
11913 switch (AM.
Scale) {
11931 int Scale = AM.
Scale;
11933 default:
return false;
11937 if (Scale < 0) Scale = -Scale;
11955 if (Scale & 1)
return false;
11973 return Imm >= 0 && Imm <= 255;
11988 return AbsImm >= 0 && AbsImm <= 255;
11993 SDValue &Offset,
bool &isInc,
12002 int RHSC = (int)RHS->getZExtValue();
12003 if (RHSC < 0 && RHSC > -256) {
12016 int RHSC = (int)RHS->getZExtValue();
12017 if (RHSC < 0 && RHSC > -0x1000) {
12052 SDValue &Offset,
bool &isInc,
12059 int RHSC = (int)RHS->getZExtValue();
12060 if (RHSC < 0 && RHSC > -0x100) {
12065 }
else if (RHSC > 0 && RHSC < 0x100) {
12089 if (
LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
12093 }
else if (
StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
12094 Ptr = ST->getBasePtr();
12095 VT = ST->getMemoryVT();
12100 bool isLegal =
false;
12125 if (
LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
12130 }
else if (
StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
12131 VT = ST->getMemoryVT();
12132 Ptr = ST->getBasePtr();
12133 isNonExt = !ST->isTruncatingStore();
12144 if (!RHS || RHS->getZExtValue() != 4)
12154 bool isLegal =
false;
12184 unsigned Depth)
const {
12186 KnownZero = KnownOne =
APInt(BitWidth, 0);
12201 if (KnownZero == 0 && KnownOne == 0)
return;
12203 APInt KnownZeroRHS, KnownOneRHS;
12205 KnownZero &= KnownZeroRHS;
12206 KnownOne &= KnownOneRHS;
12214 case Intrinsic::arm_ldaex:
12215 case Intrinsic::arm_ldrex: {
12216 EVT VT = cast<MemIntrinsicSDNode>(
Op)->getMemoryVT();
12240 switch (AsmPieces.
size()) {
12241 default:
return false;
12243 AsmStr = AsmPieces[0];
12248 if (AsmPieces.
size() == 3 &&
12249 AsmPieces[0] ==
"rev" && AsmPieces[1] ==
"$0" && AsmPieces[2] ==
"$1" &&
12285 if (Constraint.
size() == 1) {
12286 switch (Constraint[0]) {
12298 }
else if (Constraint.
size() == 2) {
12299 switch (Constraint[0]) {
12318 if (!CallOperandVal)
12322 switch (*constraint) {
12342 typedef std::pair<unsigned, const TargetRegisterClass*>
RCPair;
12345 if (Constraint.
size() == 1) {
12347 switch (Constraint[0]) {
12350 return RCPair(0U, &ARM::tGPRRegClass);
12351 return RCPair(0U, &ARM::GPRRegClass);
12354 return RCPair(0U, &ARM::hGPRRegClass);
12358 return RCPair(0U, &ARM::tGPRRegClass);
12359 return RCPair(0U, &ARM::GPRRegClass);
12364 return RCPair(0U, &ARM::SPRRegClass);
12366 return RCPair(0U, &ARM::DPRRegClass);
12368 return RCPair(0U, &ARM::QPRRegClass);
12374 return RCPair(0U, &ARM::SPR_8RegClass);
12376 return RCPair(0U, &ARM::DPR_8RegClass);
12378 return RCPair(0U, &ARM::QPR_8RegClass);
12382 return RCPair(0U, &ARM::SPRRegClass);
12386 if (
StringRef(
"{cc}").equals_lower(Constraint))
12387 return std::make_pair(
unsigned(ARM::CPSR), &ARM::CCRRegClass);
12395 std::string &Constraint,
12396 std::vector<SDValue>&Ops,
12401 if (Constraint.length() != 1)
return;
12403 char ConstraintLetter = Constraint[0];
12404 switch (ConstraintLetter) {
12407 case 'I':
case 'J':
case 'K':
case 'L':
12408 case 'M':
case 'N':
case 'O':
12414 int CVal = (int) CVal64;
12417 if (CVal != CVal64)
12420 switch (ConstraintLetter) {
12425 if (CVal >= 0 && CVal <= 65535)
12432 if (CVal >= 0 && CVal <= 255)
12434 }
else if (Subtarget->
isThumb2()) {
12453 if (CVal >= -255 && CVal <= -1)
12459 if (CVal >= -4095 && CVal <= 4095)
12472 }
else if (Subtarget->
isThumb2()) {
12495 if (CVal >= -7 && CVal < 7)
12497 }
else if (Subtarget->
isThumb2()) {
12520 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0))
12526 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0))
12534 if (CVal >= 0 && CVal <= 31)
12543 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0))
12553 Ops.push_back(Result);
12563 "Unhandled Opcode in getDivRemLibcall");
12581 "Unhandled Opcode in getDivRemArgList");
12591 Entry.
isSExt = isSigned;
12592 Entry.
isZExt = !isSigned;
12593 Args.push_back(Entry);
12604 "Register-based DivRem lowering only");
12607 "Invalid opcode for Div/Rem lowering");
12623 SDValue Div = DAG.
getNode(DivOpcode, dl, VT, Dividend, Divisor);
12627 SDValue Values[2] = {Div, Rem};
12652 std::pair<SDValue, SDValue> CallInfo =
LowerCallTo(CLI);
12653 return CallInfo.first;
12660 std::vector<Type*> RetTyParams;
12661 Type *RetTyElement;
12671 RetTyParams.push_back(RetTyElement);
12672 RetTyParams.push_back(RetTyElement);
12689 CallLoweringInfo CLI(DAG);
12693 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
12696 SDNode *ResNode = CallResult.first.getNode();
12723 SDValue Ops[2] = { NewSP, Chain };
12729 "Unexpected type for custom-lowering FP_EXTEND");
12742 "Unexpected type for custom-lowering FP_ROUND");
12759 if (v == 0xffffffff)
12776 return ARM_AM::getFP64Imm(Imm) != -1;
12785 unsigned Intrinsic)
const {
12786 switch (Intrinsic) {
12787 case Intrinsic::arm_neon_vld1:
12788 case Intrinsic::arm_neon_vld2:
12789 case Intrinsic::arm_neon_vld3:
12790 case Intrinsic::arm_neon_vld4:
12791 case Intrinsic::arm_neon_vld2lane:
12792 case Intrinsic::arm_neon_vld3lane:
12793 case Intrinsic::arm_neon_vld4lane: {
12797 uint64_t NumElts = DL.getTypeSizeInBits(I.
getType()) / 64;
12802 Info.
align = cast<ConstantInt>(AlignArg)->getZExtValue();
12808 case Intrinsic::arm_neon_vst1:
12809 case Intrinsic::arm_neon_vst2:
12810 case Intrinsic::arm_neon_vst3:
12811 case Intrinsic::arm_neon_vst4:
12812 case Intrinsic::arm_neon_vst2lane:
12813 case Intrinsic::arm_neon_vst3lane:
12814 case Intrinsic::arm_neon_vst4lane: {
12818 unsigned NumElts = 0;
12823 NumElts += DL.getTypeSizeInBits(ArgTy) / 64;
12829 Info.
align = cast<ConstantInt>(AlignArg)->getZExtValue();
12835 case Intrinsic::arm_ldaex:
12836 case Intrinsic::arm_ldrex: {
12849 case Intrinsic::arm_stlex:
12850 case Intrinsic::arm_strex: {
12863 case Intrinsic::arm_stlexd:
12864 case Intrinsic::arm_strexd: {
12875 case Intrinsic::arm_ldaexd:
12876 case Intrinsic::arm_ldrexd: {
12901 if (Bits == 0 || Bits > 32)
12907 unsigned Index)
const {
12946 bool IsLoad)
const {
12971 bool IsLoad)
const {
12993 return (Size == 64) && !Subtarget->
isMClass();
13016 return (Size <= (Subtarget->
isMClass() ? 32U : 64U) && hasAtomicRMW)
13028 bool hasAtomicCmpXchg =
13035 return InsertFencesForAtomic;
13044 unsigned &Cost)
const {
13058 if (!isa<ConstantInt>(Idx))
13062 unsigned BitWidth = cast<VectorType>(VectorTy)->
getBitWidth();
13065 if (BitWidth == 64 || BitWidth == 128) {
13083 Type *ValTy = cast<PointerType>(Addr->
getType())->getElementType();
13091 IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd;
13095 Value *LoHi = Builder.
CreateCall(Ldrex, Addr,
"lohi");
13108 Intrinsic::ID Int = IsAcquire ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex;
13113 cast<PointerType>(Addr->
getType())->getElementType());
13135 IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd;
13139 Value *Lo = Builder.
CreateTrunc(Val, Int32Ty,
"lo");
13147 Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex;
13172 "Invalid interleave factor");
13173 assert(!Shuffles.
empty() &&
"Empty shufflevector input");
13175 "Unmatched number of shufflevectors and indices");
13186 if (!Subtarget->
hasNEON() || (VecSize != 64 && VecSize != 128) || EltIs64Bits)
13195 static const Intrinsic::ID LoadInts[3] = {Intrinsic::arm_neon_vld2,
13196 Intrinsic::arm_neon_vld3,
13197 Intrinsic::arm_neon_vld4};
13206 Type *Tys[] = { VecTy, Int8Ptr };
13213 for (
unsigned i = 0; i < Shuffles.
size(); i++) {
13215 unsigned Index = Indices[
i];
13233 unsigned NumElts) {
13235 for (
unsigned i = 0; i < NumElts; i++)
13269 unsigned Factor)
const {
13271 "Invalid interleave factor");
13275 "Invalid interleaved store");
13287 if (!Subtarget->
hasNEON() || (SubVecSize != 64 && SubVecSize != 128) ||
13309 static const Intrinsic::ID StoreInts[3] = {Intrinsic::arm_neon_vst2,
13310 Intrinsic::arm_neon_vst3,
13311 Intrinsic::arm_neon_vst4};
13317 Type *Tys[] = { Int8Ptr, SubVecTy };
13319 SI->
getModule(), StoreInts[Factor - 2], Tys);
13323 for (
unsigned i = 0; i < Factor; i++) {
13324 if (Mask[i] >= 0) {
13328 unsigned StartMask = 0;
13329 for (
unsigned j = 1; j < LaneLen; j++) {
13330 if (Mask[j*Factor + i] >= 0) {
13331 StartMask = Mask[j*Factor +
i] - j;
13359 uint64_t &Members) {
13360 if (
auto *ST = dyn_cast<StructType>(Ty)) {
13361 for (
unsigned i = 0; i < ST->getNumElements(); ++
i) {
13362 uint64_t SubMembers = 0;
13365 Members += SubMembers;
13367 }
else if (
auto *AT = dyn_cast<ArrayType>(Ty)) {
13368 uint64_t SubMembers = 0;
13371 Members += SubMembers * AT->getNumElements();
13382 }
else if (
auto *VT = dyn_cast<VectorType>(Ty)) {
13389 return VT->getBitWidth() == 64;
13391 return VT->getBitWidth() == 128;
13393 switch (VT->getBitWidth()) {
13406 return (Members > 0 && Members <= 4);
13414 if (getEffectiveCallingConv(CallConv, isVarArg) !=
13419 uint64_t Members = 0;
13424 return IsHA || IsIntArray;
13428 const Constant *PersonalityFn)
const {
13431 return Subtarget->
useSjLjEH() ? ARM::NoRegister : ARM::R0;
13435 const Constant *PersonalityFn)
const {
13438 return Subtarget->
useSjLjEH() ? ARM::NoRegister : ARM::R1;
13447 void ARMTargetLowering::insertCopiesSplitCSR(
13460 if (ARM::GPRRegClass.
contains(*I))
13461 RC = &ARM::GPRRegClass;
13462 else if (ARM::DPRRegClass.
contains(*I))
13463 RC = &ARM::DPRRegClass;
13474 Attribute::NoUnwind) &&
13475 "Function should be nounwind in insertCopiesSplitCSR!");
13481 for (
auto *Exit : Exits)
13483 TII->
get(TargetOpcode::COPY), *
I)
static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG)
PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics.
static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG)
static bool isSimpleType(Type *T)
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG)
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type (if unknown returns 0).
int getFunctionContextIndex() const
Return the index for the function context object.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
void setFrameAddressIsTaken(bool T)
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< SDValue > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
int getPromotedConstpoolIncrease() const
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Value * getValueOperand()
MO_DLLIMPORT - On a symbol operand, this represents that the reference to the symbol is for an import...
static SDValue CombineVMOVDRRCandidateWithVecOp(const SDNode *BC, SelectionDAG &DAG)
BC is a bitcast that is about to be turned into a VMOVDRR.
bool use_empty() const
Return true if there are no uses of this node.
const Value * getCalledValue() const
Get a pointer to the function that is invoked by this instruction.
static MVT getIntegerVT(unsigned BitWidth)
void computeKnownBits(const Value *V, APInt &KnownZero, APInt &KnownOne, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
void push_back(const T &Elt)
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
bool isEHPad() const
Returns true if the block is a landing pad.
static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG)
A parsed version of the target data layout string in and methods for querying it. ...
static bool isVZIPMask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
SDValue getValue(unsigned R) const
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
static SDValue AddCombineToVPADD(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
void markGlobalAsPromotedToConstantPool(const GlobalVariable *GV)
Indicate to the backend that GV has had its storage changed to inside a constant pool.
static SDValue PerformBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformBUILD_VECTORCombine - Target-specific dag combine xforms for ISD::BUILD_VECTOR.
bool hasCallSiteLandingPad(MCSymbol *Sym)
Return true if the landing pad Eh symbol has an associated call site.
static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt)
isVShiftLImm - Check if this is a valid build_vector for the immediate operand of a vector shift left...
static MachinePointerInfo getJumpTable(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a jump table entry.
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand...
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - Return the value type to use for ISD::SETCC.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant, which is required to be operand #1) half of the integer or float value specified as operand #0.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
Flags getFlags() const
Return the raw flags of the source value,.
static APInt getAllOnesValue(unsigned numBits)
Get the all-ones value.
LLVMContext * getContext() const
LLVM Argument representation.
bool isOSBinFormatMachO() const
Tests whether the environment is MachO.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG)
uint64_t getZExtValue() const
Get zero extended value.
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
SDValue CombineTo(SDNode *N, ArrayRef< SDValue > To, bool AddTo=true)
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd)...
static ARMConstantPoolSymbol * Create(LLVMContext &C, StringRef s, unsigned ID, unsigned char PCAdj)
static SDValue WinDBZCheckDenominator(SelectionDAG &DAG, SDNode *N, SDValue InChain)
static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG)
STATISTIC(NumFunctions,"Total number of functions")
void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const override
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag...
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR (an vector value) starting with the ...
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
BR_CC - Conditional branch.
static SDValue PerformVMULCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformVMULCombine Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the special multi...
static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op, SelectionDAG &DAG)
LocInfo getLocInfo() const
static MVT getVectorVT(MVT VT, unsigned NumElements)
static bool IsVUZPShuffleNode(SDNode *N)
TOF
Target Operand Flag enum.
ARM_APCS - ARM Procedure Calling Standard calling convention (obsolete, but still used on some target...
bool hasOneUse() const
Return true if there is exactly one use of this node.
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
MachineBasicBlock * getMBB() const
static void ExpandREAD_REGISTER(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG)
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
const MCPhysReg * getCalleeSavedRegsViaCopy(const MachineFunction *MF) const
unsigned EnableFastISel
EnableFastISel - This flag enables fast-path instruction selection which trades away generated code q...
A Module instance is used to store all the information related to an LLVM module. ...
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
void getInRegsParamInfo(unsigned InRegsParamRecordIndex, unsigned &BeginReg, unsigned &EndReg) const
const char * LowerXConstraint(EVT ConstraintVT) const override
Try to replace an X constraint, which matches anything, with another that has more specific requireme...
void setIsEHPad(bool V=true)
Indicates the block is a landing pad.
const TargetMachine & getTargetMachine() const
an instruction that atomically checks whether a specified value is in a memory location, and, if it is, stores a new value there.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
static unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm)
bool isCalledByLegalizer() const
static SDValue lowerCTPOP32BitElements(SDNode *N, SelectionDAG &DAG)
lowerCTPOP32BitElements - Returns a v2i32/v4i32 vector containing the bit-count for each 32-bit eleme...
static SDValue PerformInsertEltCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
PerformInsertEltCombine - Target-specific dag combine xforms for ISD::INSERT_VECTOR_ELT.
EABI EABIVersion
EABIVersion - This flag specifies the EABI version.
static bool isVTRNMask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG &DAG)
SkipLoadExtensionForVMULL - return a load of the original vector size that does not do any sign/zero ...
int getSplatIndex() const
Carry-setting nodes for multiple precision addition and subtraction.
const TargetMachine & getTarget() const
ARMConstantPoolValue - ARM specific constantpool value.
static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, bool isSigned)
isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each element has been zero/sign-...
unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
unsigned getPrefTypeAlignment(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
void setIsDef(bool Val=true)
Change a def to a use, or a use to a def.
Describe properties that are true of each instruction in the target description file.
void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override
static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base, uint64_t &Members)
bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
getPreIndexedAddressParts - returns true by value, base pointer and offset pointer and addressing mod...
BBTy * getParent() const
Get the basic block containing the call site.
Y = RRC X, rotate right via carry.
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
static bool hasNormalLoadOperand(SDNode *N)
hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node are normal, non-volatile loads.
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain...
bool hasT2ExtractPack() const
This class represents a function call, abstracting a target machine's calling convention.
unsigned InferPtrAlignment(SDValue Ptr) const
Infer alignment of a load / store address.
void computeKnownBits(SDValue Op, APInt &KnownZero, APInt &KnownOne, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in the KnownZero/KnownO...
EK_Inline - Jump table entries are emitted inline at their point of use.
size_type count(PtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Get a value with low bits set.
const GlobalValue * getGlobal() const
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit...
const std::string & getAsmString() const
Global Offset Table, Thread Pointer Offset.
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change...
static MVT getFloatingPointVT(unsigned BitWidth)
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
std::pair< Value *, Value * > ShuffleOps
We are building a shuffle to create V, which is a sequence of insertelement, extractelement pairs...
static bool isLegalT1AddressImmediate(int64_t V, EVT VT)
static cl::opt< bool > ARMInterworking("arm-interworking", cl::Hidden, cl::desc("Enable / disable ARM interworking (for debugging only)"), cl::init(true))
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Function Alias Analysis Results
Type * getTypeForEVT(LLVMContext &Context) const
getTypeForEVT - This method returns an LLVM type corresponding to the specified EVT.
unsigned getSizeInBits() const
This instruction constructs a fixed permutation of two input vectors.
const unsigned char * bytes_end() const
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
unsigned getByValSize() const
bool isUnindexed() const
Return true if this is NOT a pre/post inc/dec load/store.
uint64_t getLimitedValue(uint64_t Limit=~0ULL) const
If this value is smaller than the specified limit, return it, otherwise return the limit value...
unsigned getInRegsParamsCount() const
static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG)
SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND, extending load, or BUILD_VECTOR with extended elements, return the unextended value.
static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, const ARMSubtarget *ST, const SDLoc &dl)
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
unsigned getNumOperands() const
Return the number of values used by this operation.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly...
const std::string & getConstraintString() const
const Function * getParent() const
Return the enclosing method, or null if none.
unsigned getNumOperands() const
unsigned getValueSizeInBits() const
Returns the size of the value in bits.
static SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes, TargetLowering::DAGCombinerInfo &DCI)
const SDValue & getOperand(unsigned Num) const
void setIsDead(bool Val=true)
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
const Function * getFunction() const
getFunction - Return the LLVM function that this machine code represents
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
setjmp/longjmp based exceptions
An instruction for reading from memory.
static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCombineWithOperands - Try DAG combinations for an ADD with operands N0 and N1...
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
static IntegerType * getInt64Ty(LLVMContext &C)
[US]{MIN/MAX} - Binary minimum or maximum or signed or unsigned integers.
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, ISD::ZERO_EXTEND, and ISD::ANY_EXTEND.
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
Type * getElementType() const
Global Offset Table, PC Relative.
Value * CallOperandVal
If this is the result output operand or a clobber, this is null, otherwise it is the incoming operand...
static IntegerType * getInt16Ty(LLVMContext &C)
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) const override
createFastISel - This method returns a target specific FastISel object, or null if the target does no...
bool isThumb1Only() const
static SDValue PerformANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue CombineBaseUpdate(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
CombineBaseUpdate - Target-specific DAG combine function for VLDDUP, NEON load/store intrinsics...
unsigned getValNo() const
const SDValue & getBasePtr() const
bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const override
Return true if it is profitable for dag combiner to transform a floating point op of specified opcode...
static SDValue PerformSUBCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB.
bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
std::size_t countLeadingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the most significant bit to the least stopping at the first 1...
static bool isThumb(const MCSubtargetInfo &STI)
unsigned createPICLabelUId()
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool isVarArg) const
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
unsigned getResNo() const
get the index which selects a specific result in the SDNode
Libcall getFPROUND(EVT OpVT, EVT RetVT)
getFPROUND - Return the FPROUND_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
bool bitsLT(EVT VT) const
bitsLT - Return true if this has less bits than VT.
bool isUndef() const
Return true if the type of the node type undefined.
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
static bool allUsersAreInFunction(const Value *V, const Function *F)
Return true if all users of V are within function F, looking through ConstantExprs.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
bool optForMinSize() const
Optimize this function for minimum size (-Oz).
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
SDValue getExternalSymbol(const char *Sym, EVT VT)
return AArch64::GPR64RegClass contains(Reg)
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool isOSWindows() const
Tests whether the OS is Windows.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, unsigned Factor) const override
Lower an interleaved store into a vstN intrinsic.
MachineJumpTableInfo * getOrCreateJumpTableInfo(unsigned JTEntryKind)
getOrCreateJumpTableInfo - Get the JumpTableInfo for this function, if it does already exist...
Libcall getUINTTOFP(EVT OpVT, EVT RetVT)
getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG)
const Triple & getTargetTriple() const
const ARMBaseInstrInfo * getInstrInfo() const override
static MCDisassembler::DecodeStatus addOperand(MCInst &Inst, const MCOperand &Opnd)
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic...
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations...
bool isVector() const
isVector - Return true if this is a vector value type.
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
The address of a basic block.
bool isTargetAEABI() const
bool isSEXTLoad(const SDNode *N)
Returns true if the specified node is a SEXTLOAD.
void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
LowerAsmOperandForConstraint - Lower the specified operand into the Ops vector.
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
Code Generation virtual methods...
bool useSoftFloat() const
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
int getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
static bool isFloatingPointZero(SDValue Op)
isFloatingPointZero - Return true if this is +0.0.
static const MachineInstrBuilder & AddDefaultPred(const MachineInstrBuilder &MIB)
A description of a memory reference used in the backend.
static SDValue PerformLOADCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
struct fuzzer::@269 Flags
ParmContext getCallOrPrologue() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
const HexagonInstrInfo * TII
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a vldN-lane (N > 1) intrinsic...
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
static MachineOperand CreateReg(unsigned Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false)
Shift and rotation operations.
static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG)
static bool isVUZPMask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
bool isTargetDarwin() const
static bool isUpperSaturate(const SDValue LHS, const SDValue RHS, const SDValue TrueVal, const SDValue FalseVal, const ISD::CondCode CC, const SDValue K)
bool isNormalStore(const SDNode *N)
Returns true if the specified node is a non-truncating and unindexed store.
std::size_t countTrailingOnes(T Value, ZeroBehavior ZB=ZB_Width)
Count the number of ones from the least significant bit to the first zero bit.
static Constant * get(LLVMContext &Context, ArrayRef< uint8_t > Elts)
get() constructors - Return a constant with array type with an element count and element type matchin...
bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef...
Type * getArrayElementType() const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
A Use represents the edge between a Value definition and its users.
static SDValue findMUL_LOHI(SDValue V)
static bool isVREVMask(ArrayRef< int > M, EVT VT, unsigned BlockSize)
isVREVMask - Check if a vector shuffle corresponds to a VREV instruction with the specified blocksize...
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s), MachineInstr opcode, and operands.
MachineFunction & getMachineFunction() const
static bool isVZIP_v_undef_Mask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of "vector_shuffle v...
Value * emitStoreConditional(IRBuilder<> &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override
Perform a store-conditional operation to Addr.
unsigned getFrameRegister(const MachineFunction &MF) const override
static void advance(T &it, size_t Val)
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
CallLoweringInfo & setChain(SDValue InChain)
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG)
PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for ISD::VECTOR_SHUFFLE.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
separate const offset from Split GEPs to a variadic base and a constant offset for better CSE
bool isReg() const
isReg - Tests if this is a MO_Register operand.
unsigned getNumArgOperands() const
Return the number of call arguments.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
CopyToReg - This node has three operands: a chain, a register number to set to this value...
static SDValue PerformADDCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCCombine - Target-specific dag combine transform from ISD::ADDC, ISD::ADDE, and ISD::MUL_LOHI to MLAL or ISD::ADDC, ISD::ADDE and ARMISD::UMLAL to ARMISD::UMAAL.
static Constant * get(ArrayRef< Constant * > V)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
unsigned getArgRegsSaveSize() const
LLVM_ATTRIBUTE_ALWAYS_INLINE R Default(const T &Value) const
FLT_ROUNDS_ - Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest 2 Round to ...
std::vector< MachineBasicBlock * >::iterator succ_iterator
bool isZEXTLoad(const SDNode *N)
Returns true if the specified node is a ZEXTLOAD.
EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc, MachineFunction &MF) const override
Returns the target specific optimal type for load and store operations as a result of memset...
Reg
All possible values of the reg field in the ModR/M byte.
static cl::opt< bool > EnableConstpoolPromotion("arm-promote-constant", cl::Hidden, cl::desc("Enable / disable promotion of unnamed_addr constants into ""constant pools"), cl::init(true))
static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl)
getZeroVector - Returns a vector of specified type with all zero elements.
The memory access is dereferenceable (i.e., doesn't trap).
EVT getScalarType() const
getScalarType - If this is a vector type, return the element type, otherwise return this...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt) For double-word atomic operations: ValLo, ValHi, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amtLo, amtHi) ValLo, ValHi, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amtLo, amtHi) These correspond to the atomicrmw instruction.
ObjectFormatType getObjectFormat() const
getFormat - Get the object format for this triple.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
bool hasMPExtension() const
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
bool isStrongDefinitionForLinker() const
Returns true if this global's definition will be the one chosen by the linker.
int getMaskElt(unsigned Idx) const
static bool isSingletonVEXTMask(ArrayRef< int > M, EVT VT, unsigned &Imm)
bool isPositionIndependent() const
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
static bool isZeroExtended(SDNode *N, SelectionDAG &DAG)
isZeroExtended - Check if a node is a vector value that is zero-extended or a constant BUILD_VECTOR w...
static ShiftOpc getShiftOpcForNode(unsigned Opcode)
static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, int64_t &Cnt)
isVShiftRImm - Check if this is a valid build_vector for the immediate operand of a vector shift righ...
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG...
bool isFPBrccSlow() const
Type * getVectorElementType() const
CCAssignFn * CCAssignFnForReturn(CallingConv::ID CC, bool isVarArg) const
SmallVectorImpl< unsigned > & getCallSiteLandingPad(MCSymbol *Sym)
Get the call site indexes for a landing pad EH symbol.
static SDValue PerformMULCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Sched::Preference getSchedulingPreference() const
Return target scheduling preference.
bool isInteger() const
isInteger - Return true if this is an integer, or a vector integer type.
static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2)
Return the store opcode for a given store size.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_NODISCARD bool empty() const
unsigned getNumOperands() const
Access to explicit operands of the instruction.
bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
getPostIndexedAddressParts - returns true by value, base pointer and offset pointer and addressing mo...
SmallVector< ISD::InputArg, 32 > Ins
AtomicOrdering
Atomic ordering for LLVM's memory model.
STACKSAVE - STACKSAVE has one operand, an input chain.
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx, unsigned &Cost) const override
Return true if the target can combine store(extractelement VectorTy, Idx).
EVT getVectorElementType() const
getVectorElementType - Given a vector type, return the type of each element.
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector...
static const unsigned PerfectShuffleTable[6561+1]
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG)
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
void RemoveOperand(unsigned i)
Erase an operand from an instruction, leaving it with one fewer operand than it started with...
bool lowerInterleavedLoad(LoadInst *LI, ArrayRef< ShuffleVectorInst * > Shuffles, ArrayRef< unsigned > Indices, unsigned Factor) const override
Lower an interleaved load into a vldN intrinsic.
unsigned getLocReg() const
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Value * emitLoadLinked(IRBuilder<> &Builder, Value *Addr, AtomicOrdering Ord) const override
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type...
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
const Triple & getTargetTriple() const
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
const uint32_t * getThisReturnPreservedMask(const MachineFunction &MF, CallingConv::ID) const
getThisReturnPreservedMask - Returns a call preserved mask specific to the case that 'returned' is on...
OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN) The target initializes the dispatch table here...
static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG)
static unsigned createNEONModImm(unsigned OpCmode, unsigned Val)
unsigned createJumpTableIndex(const std::vector< MachineBasicBlock * > &DestBBs)
createJumpTableIndex - Create a new jump table.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
bool isTargetCOFF() const
SDValue getRegisterMask(const uint32_t *RegMask)
bool hasStructRetAttr() const
Determine if the function returns a structure through first pointer argument.
static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG)
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
bool isTargetMachO() const
This contains information for each constraint that we are lowering.
Simple integer binary arithmetic operators.
static int getT2SOImmVal(unsigned Arg)
getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit into a Thumb-2 shifter_oper...
void addInRegsParamInfo(unsigned RegBegin, unsigned RegEnd)
SmallVector< ISD::OutputArg, 32 > Outs
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
unsigned getActiveBits() const
Compute the number of active bits in the value.
CallLoweringInfo & setZExtResult(bool Value=true)
bool isFloatingPointTy() const
Return true if this is one of the six floating-point types.
bool hasAnyDataBarrier() const
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
const SDValue & getBasePtr() const
MachineConstantPoolValue * getMachineCPVal() const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
static GCRegistry::Add< OcamlGC > B("ocaml","ocaml 3.10-compatible GC")
void setIsSplitCSR(bool s)
An instruction for storing to memory.
static mvt_range integer_vector_valuetypes()
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
const APInt & getAPIntValue() const
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
bool isArrayTy() const
True if this is an instance of ArrayType.
static int getFP32Imm(const APInt &Imm)
getFP32Imm - Return an 8-bit floating-point version of the 32-bit floating-point value.
EVT getMemoryVT() const
Return the type of the in-memory value.
CodeGenOpt::Level getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="")
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
static SDValue ParseBFI(SDNode *N, APInt &ToMask, APInt &FromMask)
bool isTargetWatchOS() const
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
bool isThumb1OnlyFunction() const
static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG)
static const MCPhysReg GPRArgRegs[]
Value * CreateZExtOrBitCast(Value *V, Type *DestTy, const Twine &Name="")
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
const uint32_t * getSjLjDispatchPreservedMask(const MachineFunction &MF) const
size_t size() const
size - Get the array size.
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
const DataLayout & getDataLayout() const
Maximum length of the test input libFuzzer tries to guess a good value based on the corpus and reports it always prefer smaller inputs during the corpus shuffle When libFuzzer itself reports a bug this exit code will be used If indicates the maximal total time in seconds to run the fuzzer minimizes the provided crash input Use with etc Experimental Use value profile to guide fuzzing Number of simultaneous worker processes to run the jobs If min(jobs, NumberOfCpuCores()/2)\" is used.") FUZZER_FLAG_INT(reload
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset)
Stack pointer relative access.
static SDValue FindBFIToCombineWith(SDNode *N)
Class to represent pointers.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
UNDEF - An undefined node.
This class is used to represent ISD::STORE nodes.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
static GCRegistry::Add< CoreCLRGC > E("coreclr","CoreCLR-compatible GC")
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
static bool isThumbImmShiftedVal(unsigned V)
isThumbImmShiftedVal - Return true if the specified value can be obtained by left shifting a 8-bit im...
static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG)
PerformVMOVDRRCombine - Target-specific dag combine xforms for ARMISD::VMOVDRR.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
const MachineBasicBlock * getParent() const
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the specified, possibly variable...
unsigned getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
TargetInstrInfo - Interface to description of machine instruction set.
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Get a value with high bits set.
INITIALIZE_PASS(HexagonEarlyIfConversion,"hexagon-eif","Hexagon early if conversion", false, false) bool HexagonEarlyIfConversion MachineBasicBlock * SB
static void emitPostLd(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos, const TargetInstrInfo *TII, const DebugLoc &dl, unsigned LdSize, unsigned Data, unsigned AddrIn, unsigned AddrOut, bool IsThumb1, bool IsThumb2)
Emit a post-increment load operation with given size.
void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, SDValue &NewRHS, ISD::CondCode &CCCode, const SDLoc &DL) const
Soften the operands of a comparison.
static EVT getExtensionTo64Bits(const EVT &OrigVT)
SDNode * getNode() const
get the SDNode which holds the desired result
The memory access is volatile.
void setReturnRegsCount(unsigned s)
bool isExtractSubvectorCheap(EVT ResVT, unsigned Index) const override
Return true if EXTRACT_SUBVECTOR is cheap for this result type with this index.
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
MinAlign - A and B are either alignments or offsets.
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
unsigned getScalarSizeInBits() const
unsigned getStoreSize() const
getStoreSize - Return the number of bytes overwritten by a store of the specified value type...
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
A switch()-like statement whose cases are string literals.
Type * getParamType(unsigned i) const
Parameter type accessors.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set, or Regs.size() if they are all allocated.
static SDValue PerformADDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
bool isOnlyUserOf(const SDNode *N) const
Return true if this node is the only use of N.
initializer< Ty > init(const Ty &Val)
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
static SDValue PerformBFICombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
void addLiveIn(MCPhysReg PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
unsigned UnsafeFPMath
UnsafeFPMath - This flag is enabled when the -enable-unsafe-fp-math flag is specified on the command ...
bool isInteger() const
isInteger - Return true if this is an integer, or a vector integer type.
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
unsigned getAlignment() const
Return the alignment of the access that is being performed.
bool useSoftFloat() const override
unsigned const MachineRegisterInfo * MRI
static bool isSignExtended(SDNode *N, SelectionDAG &DAG)
isSignExtended - Check if a node is a vector value that is sign-extended or a constant BUILD_VECTOR w...
std::size_t countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the least significant bit to the most stopping at the first 1...
static int getFP32Imm(const APInt &Imm)
getFP32Imm - Return an 8-bit floating-point version of the 32-bit floating-point value.
static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, bool isSEXTLoad, SDValue &Base, SDValue &Offset, bool &isInc, SelectionDAG &DAG)
APInt trunc(unsigned width) const
Truncate to new width.
constexpr bool isPowerOf2_32(uint32_t Value)
isPowerOf2_32 - This function returns true if the argument is a power of two > 0. ...
An array constant whose element type is a simple 1/2/4/8-byte integer or float/double, and whose elements are just simple data values (i.e.
bool isMachineConstantPoolEntry() const
SDValue PerformCMOVToBFICombine(SDNode *N, SelectionDAG &DAG) const
static SDValue PerformVDIVCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD) can replace combinations of ...
CodeModel::Model getCodeModel() const
Returns the code model.
MVT - Machine Value Type.
ParmContext
ParmContext - This enum tracks whether calling convention lowering is in the context of prologue or c...
static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, ArrayRef< int > ShuffleMask, SelectionDAG &DAG)
LLVM Basic Block Representation.
static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, TargetLowering::DAGCombinerInfo &DCI, bool AllOnes=false)
const SDValue & getOperand(unsigned i) const
The instances of the Type class are immutable: once they are created, they are never changed...
static SDValue PerformVDUPCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
PerformVDUPCombine - Target-specific dag combine xforms for ARMISD::VDUP.
bool shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, unsigned &PrefAlign) const override
Return true if the pointer arguments to CI should be aligned by aligning the object whose address is ...
This is an important class for using LLVM in a threaded context.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type...
Simple binary floating point operators.
void setTargetDAGCombine(ISD::NodeType NT)
Targets should invoke this method for each target independent node that they want to provide a custom...
C - The default llvm calling convention, compatible with C.
bool isVectorTy() const
True if this is an instance of VectorType.
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align=0, bool Vol=false, bool ReadMem=true, bool WriteMem=true, unsigned Size=0)
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
bool supportsTailCall() const
bool isOperationLegalOrCustom(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
bool isOptionalDef() const
Set if this operand is a optional def.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
This is an important base class in LLVM.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE...
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
static SDValue LowerWRITE_REGISTER(SDValue Op, SelectionDAG &DAG)
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL...
const Constant * getConstVal() const
This file contains the declarations for the subclasses of Constant, which represent the different fla...
bool isFloatingPoint() const
isFloatingPoint - Return true if this is a FP, or a vector FP type.
const MachineOperand & getOperand(unsigned i) const
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
static cl::opt< unsigned > ConstpoolPromotionMaxSize("arm-promote-constant-max-size", cl::Hidden, cl::desc("Maximum size of constant to promote into a constant pool"), cl::init(64))
Carry-using nodes for multiple precision addition and subtraction.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
ConstantFP - Floating Point Values [float, double].
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
static bool isReverseMask(ArrayRef< int > M, EVT VT)
unsigned getInRegsParamsProcessed() const
unsigned getScalarValueSizeInBits() const
Libcall getFPTOSINT(EVT OpVT, EVT RetVT)
getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
static bool isZeroOrAllOnes(SDValue N, bool AllOnes)
bool hasVAStart() const
Returns true if the function calls the llvm.va_start intrinsic.
void AddToWorklist(SDNode *N)
static bool isLowerSaturate(const SDValue LHS, const SDValue RHS, const SDValue TrueVal, const SDValue FalseVal, const ISD::CondCode CC, const SDValue K)
static mvt_range fp_valuetypes()
static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC)
IntCCToARMCC - Convert a DAG integer condition code to an ARM CC.
static bool BitsProperlyConcatenate(const APInt &A, const APInt &B)
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
constexpr bool isPowerOf2_64(uint64_t Value)
isPowerOf2_64 - This function returns true if the argument is a power of two 0 (64 bit edition...
static Type * getVoidTy(LLVMContext &C)
This class provides iterator support for SDUse operands that use a specific SDNode.
static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
static SDValue LowerSDIV_v4i16(SDValue N0, SDValue N1, const SDLoc &dl, SelectionDAG &DAG)
bool isBeforeLegalize() const
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang","erlang-compatible garbage collector")
static EVT getFloatingPointVT(unsigned BitWidth)
getFloatingPointVT - Returns the EVT that represents a floating point type with the given number of b...
unsigned getBitWidth() const
Return the number of bits in the APInt.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
unsigned getOpcode() const
static SDValue AddCombineVUZPToVPADDL(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
TRAP - Trapping instruction.
Thread Local Storage (General Dynamic Mode)
bool isTargetWatchABI() const
Value * getOperand(unsigned i) const
unsigned convertAddSubFlagsOpcode(unsigned OldOpc)
Map pseudo instructions that imply an 'S' bit onto real opcodes.
static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, SDValue &RetVal1, SDValue &RetVal2)
Value * getPointerOperand()
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
static bool resultsCompatible(CallingConv::ID CalleeCC, CallingConv::ID CallerCC, MachineFunction &MF, LLVMContext &C, const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn CalleeFn, CCAssignFn CallerFn)
Returns true if the results of the two calling conventions are compatible.
static mvt_range vector_valuetypes()
std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const override
Return the largest legal super-reg register class of the register class for the specified type and it...
int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS) const override
getScalingFactorCost - Return the cost of the scaling used in addressing mode represented by AM...
SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const
PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV.
bool shouldInsertFencesForAtomic(const Instruction *I) const override
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic...
static SDValue getCTPOP16BitCounts(SDNode *N, SelectionDAG &DAG)
getCTPOP16BitCounts - Returns a v8i8/v16i8 vector containing the bit-count for each 16-bit element fr...
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all...
bool hasVMLxForwarding() const
self_iterator getIterator()
The memory access is non-temporal.
Class to represent integer types.
static MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool Immutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
unsigned countPopulation() const
Count the number of bits set.
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
static void ReplaceCMP_SWAP_64Results(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG)
static SDValue PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Target-specific dag combine xforms for ARMISD::BUILD_VECTOR.
bool useNEONForSinglePrecisionFP() const
bool empty() const
empty - Check if the array is empty.
use_iterator use_begin() const
Provide iteration support to walk over all uses of an SDNode.
bool hasV8MBaselineOps() const
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
const SDValue & getValue() const
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
unsigned MaxStoresPerMemmove
Specify maximum bytes of store instructions per memmove call.
Bit counting operators with an undefined result for zero inputs.
static SDValue PerformVDUPLANECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
PerformVDUPLANECombine - Target-specific dag combine xforms for ARMISD::VDUPLANE. ...
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo...
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
bool isCheapToSpeculateCttz() const override
Return true if it is cheap to speculate a call to intrinsic cttz.
SmallPtrSet< const GlobalVariable *, 2 > & getGlobalsPromotedToConstantPool()
succ_iterator succ_begin()
static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
EVT - Extended Value Type.
static SDValue PerformORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformORCombine - Target-specific dag combine xforms for ISD::OR.
bool isIntN(unsigned N, int64_t x)
isIntN - Checks if an signed integer fits into the given (dynamic) bit width.
bool isPointerTy() const
True if this is an instance of PointerType.
std::vector< ArgListEntry > ArgListTy
const APFloat & getValueAPF() const
unsigned getNextStackOffset() const
getNextStackOffset - Return the next stack offset such that all stack slots satisfy their alignment r...
bool hasSinCos() const
This function returns true if the target has sincos() routine in its compiler runtime or math librari...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
This structure contains all information that is necessary for lowering calls.
bool shouldAssumeDSOLocal(const Module &M, const GlobalValue *GV) const
static bool isLTorLE(ISD::CondCode CC)
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
PointerType * getInt8PtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer to an 8-bit integer value.
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
bool hasRetAddrStack() const
static ARMConstantPoolMBB * Create(LLVMContext &C, const MachineBasicBlock *mbb, unsigned ID, unsigned char PCAdj)
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements)
getVectorVT - Returns the EVT that represents a vector NumElements in length, where each element is o...
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, bool isSigned, const SDLoc &dl, bool doesNotReturn=false, bool isReturnValueUsed=true) const
Returns a pair of (return value, chain).
This class contains a discriminated union of information about pointers in memory operands...
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static SDValue LowerSDIV_v4i8(SDValue X, SDValue Y, const SDLoc &dl, SelectionDAG &DAG)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC)
Set the CallingConv that should be used for the specified libcall.
std::pair< unsigned, const TargetRegisterClass * > RCPair
static SDValue lowerCTPOP16BitElements(SDNode *N, SelectionDAG &DAG)
lowerCTPOP16BitElements - Returns a v4i16/v8i16 vector containing the bit-count for each 16-bit eleme...
static Constant * getSequentialMask(IRBuilder<> &Builder, unsigned Start, unsigned NumElts)
Get a mask consisting of sequential integers starting from Start.
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="")
const SDValue & getRoot() const
Return the root tag of the SelectionDAG.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
bool genExecuteOnly() const
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands...
Triple - Helper class for working with autoconf configuration names.
bool isEXTLoad(const SDNode *N)
Returns true if the specified node is a EXTLOAD.
static bool isReleaseOrStronger(AtomicOrdering ao)
const MachinePointerInfo & getPointerInfo() const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC)
Override the default CondCode to be used to test the result of the comparison libcall against zero...
unsigned getByValAlign() const
unsigned getMaxSupportedInterleaveFactor() const override
Get the maximum supported factor for interleaved memory accesses.
The memory access writes data.
static const int BlockSize
void SplitString(StringRef Source, SmallVectorImpl< StringRef > &OutFragments, StringRef Delimiters=" \t\n\v\f\r")
SplitString - Split up the specified string according to the specified delimiters, appending the result fragments to the output list.
bool bitsGT(EVT VT) const
bitsGT - Return true if this has more bits than VT.
bool genLongCalls() const
virtual const TargetSubtargetInfo * getSubtargetImpl(const Function &) const
Virtual method implemented by subclasses that returns a reference to that target's TargetSubtargetInf...
ArrayRef< int > getMask() const
unsigned countTrailingZeros() const
Count the number of trailing zero bits.
Libcall getFPEXT(EVT OpVT, EVT RetVT)
getFPEXT - Return the FPEXT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
PerformShiftCombine - Checks for immediate versions of vector shifts and lowers them.
static AddrOpc getAM2Op(unsigned AM2Opc)
SDValue getTargetConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offset=0, unsigned char TargetFlags=0)
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned char TargetFlags=0)
bool hasExternalWeakLinkage() const
static SDValue promoteToConstantPool(const GlobalValue *GV, SelectionDAG &DAG, EVT PtrVT, SDLoc dl)
TokenFactor - This node takes multiple tokens as input and produces a single token result...
bool hasDLLImportStorageClass() const
static const MachineInstrBuilder & AddDefaultCC(const MachineInstrBuilder &MIB)
static int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
static SDValue AddCombineTo64bitUMAAL(SDNode *AddcNode, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
Iterator for intrusive lists based on ilist_node.
void setPromotedConstpoolIncrease(int Sz)
CCState - This class holds information needed while lowering arguments and return values...
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
bool isGVIndirectSymbol(const GlobalValue *GV) const
True if the GV will be accessed via an indirect symbol.
void setDesc(const MCInstrDesc &tid)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one...
auto find(R &&Range, const T &Val) -> decltype(std::begin(Range))
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly...
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
static bool isStrongerThanMonotonic(AtomicOrdering ao)
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG)
static unsigned getAM2Offset(unsigned AM2Opc)
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt)
Getvshiftimm - Check if this is a valid build_vector for the immediate operand of a vector shift oper...
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
unsigned logBase2() const
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned char TargetFlags=0) const
MachineOperand class - Representation of each machine instruction operand.
bool isPredecessorOf(const SDNode *N) const
Return true if this node is a predecessor of N.
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
static bool isReadOnly(const GlobalValue *GV)
static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG, const EVT &OrigTy, const EVT &ExtTy, unsigned ExtOpcode)
AddRequiredExtensionForVMULL - Add a sign/zero extension to extend the total value size to 64 bits...
Type * getType() const
All values are typed, get the type of this value.
const InstrItineraryData * getInstrItineraryData() const override
getInstrItins - Return the instruction itineraries based on subtarget selection.
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
Provides information about what library functions are available for the current target.
CCValAssign - Represent assignment of one arg/retval to a location.
constexpr size_t array_lengthof(T(&)[N])
Find the length of an array.
static void ReplaceREADCYCLECOUNTER(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
BRCOND - Conditional branch.
SDNode * getGluedUser() const
If this node has a glue value with a user, return the user (there is at most one).
const SDValue & getChain() const
Byte Swap and Counting operators.
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
LLVM_NODISCARD T pop_back_val()
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
This is an abstract virtual class for memory operations.
BasicBlock * GetInsertBlock() const
bool isTargetAndroid() const
static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG)
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
CallLoweringInfo & setSExtResult(bool Value=true)
void dump(const TargetInstrInfo *TII=nullptr) const
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Function * getCalledFunction() const
Return the function called, or null if this is an indirect function invocation.
bool isFPImmLegal(const APFloat &Imm, EVT VT) const override
isFPImmLegal - Returns true if the target can instruction select the specified FP immediate natively...
Represents one node in the SelectionDAG.
CondCode getSetCCInverse(CondCode Operation, bool isInteger)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
const MachineInstrBuilder & addFrameIndex(int Idx) const
static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode, bool &swpCmpOps, bool &swpVselOps)
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
void setAdjustsStack(bool V)
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG)
ExpandBITCAST - If the target supports VFP, this function is called to expand a bit convert where eit...
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
static GCRegistry::Add< ShadowStackGC > C("shadow-stack","Very portable GC for uncooperative code generators")
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
static mvt_range integer_valuetypes()
bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, SDValue &Chain) const
Check whether a given call node is in tail position within its function.
unsigned Log2_32(uint32_t Value)
Log2_32 - This function returns the floor log base 2 of the specified value, -1 if the value is zero...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
virtual unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const
If the specified machine instruction is a direct load from a stack slot, return the virtual or physic...
SDValue getCALLSEQ_START(SDValue Chain, SDValue Op, const SDLoc &DL)
Return a new CALLSEQ_START node, which always must have a glue result (to ensure it's not CSE'd)...
Value * getArgOperand(unsigned i) const
getArgOperand/setArgOperand - Return/set the i-th call argument.
static unsigned isNEONTwoResultShuffleMask(ArrayRef< int > ShuffleMask, EVT VT, unsigned &WhichResult, bool &isV_UNDEF)
Check if ShuffleMask is a NEON two-result shuffle (VZIP, VUZP, VTRN), and return the corresponding AR...
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
bool ExpandInlineAsm(CallInst *CI) const override
This hook allows the target to expand an inline asm call to be explicit llvm code if it wants to...
Class to represent vector types.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SynchronizationScope SynchScope=CrossThread, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
void setIndexedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
Class for arbitrary precision integers.
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
A "pseudo-class" with methods for operating on BUILD_VECTORs.
Select(COND, TRUEVAL, FALSEVAL).
void setMinFunctionAlignment(unsigned Align)
Set the target's minimum function alignment (in log2(bytes))
int64_t getSExtValue() const
op_iterator op_begin() const
bool isIntegerTy() const
True if this is an instance of IntegerType.
unsigned EmulatedTLS
EmulatedTLS - This flag enables emulated TLS model, using emutls function in the runtime library...
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const
Return the register class that should be used for the specified value type.
static use_iterator use_end()
iterator_range< user_iterator > users()
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Instruction * emitLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord, bool IsStore, bool IsLoad) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
const GlobalObject * getBaseObject() const
unsigned getVectorNumElements() const
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
ANY_EXTEND - Used for integer types. The high bits are undefined.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
iterator insert(iterator I, T &&Elt)
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
FMINNAN/FMAXNAN - Behave identically to FMINNUM/FMAXNUM, except that when a single input is NaN...
static bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, MachineFrameInfo &MFI, const MachineRegisterInfo *MRI, const TargetInstrInfo *TII)
MatchingStackOffset - Return true if the given stack call argument is already available in the same p...
bool isTargetGNUAEABI() const
static int getSOImmVal(unsigned Arg)
getSOImmVal - Given a 32-bit immediate, if it is something that can fit into an shifter_operand immed...
void setArgumentStackSize(unsigned size)
bool isOSVersionLT(unsigned Major, unsigned Minor=0, unsigned Micro=0) const
isOSVersionLT - Helper function for doing comparisons against version numbers included in the target ...
static SDValue PerformXORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static bool isVTBLMask(ArrayRef< int > M, EVT VT)
APInt And(const APInt &LHS, const APInt &RHS)
Bitwise AND function for APInt.
static bool isAcquireOrStronger(AtomicOrdering ao)
bool isCheapToSpeculateCtlz() const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
static SDValue PerformVCVTCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD) can replace combinations of ...
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
CallLoweringInfo & setTailCall(bool Value=true)
const TargetRegisterClass * getRegClassFor(MVT VT) const override
getRegClassFor - Return the register class that should be used for the specified value type...
Value * CreateTruncOrBitCast(Value *V, Type *DestTy, const Twine &Name="")
Section Relative (Windows TLS)
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
BR_JT - Jumptable branch.
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, unsigned Intrinsic) const override
getTgtMemIntrinsic - Represent NEON load and store intrinsics as MemIntrinsicNodes.
bool isAllOnesValue() const
Determine if all bits are set.
Representation of each machine instruction.
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the source.
Libcall getFPTOUINT(EVT OpVT, EVT RetVT)
getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
static CondCodes getOppositeCondition(CondCodes CC)
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
static bool canChangeToInt(SDValue Op, bool &SeenZero, const ARMSubtarget *Subtarget)
canChangeToInt - Given the fp compare operand, return true if it is suitable to morph to an integer c...
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
void setVarArgsFrameIndex(int Index)
SmallVector< SDValue, 32 > OutVals
unsigned getSchedClass() const
Return the scheduling class for this instruction.
static SDValue PerformVMOVRRDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformVMOVRRDCombine - Target-specific dag combine xforms for ARMISD::VMOVRRD.
static bool LowerToByteSwap(CallInst *CI)
LowerToByteSwap - Replace a call instruction into a call to bswap intrinsic.
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Bitwise operators - logical and, logical or, logical xor.
bool hasAnyUseOfValue(unsigned Value) const
Return true if there are any use of the indicated value.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
static IntegerType * getInt32Ty(LLVMContext &C)
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
static bool isVUZP_v_undef_Mask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of "vector_shuffle v...
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
static void attachMEMCPYScratchRegs(const ARMSubtarget *Subtarget, MachineInstr &MI, const SDNode *Node)
Attaches vregs to MEMCPY that it will use as scratch registers when it is expanded into LDM/STM...
static SDValue LowerInterruptReturn(SmallVectorImpl< SDValue > &RetOps, const SDLoc &DL, SelectionDAG &DAG)
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
unsigned getAlignment() const
Return the alignment of the access that is being performed.
bool isVectorLoadExtDesirable(SDValue ExtVal) const override
Return true if folding a vector load into ExtVal (a sign, zero, or any extend node) is profitable...
ARMFunctionInfo - This class is derived from MachineFunctionInfo and contains private ARM-specific in...
static bool isLegalT2AddressImmediate(int64_t V, EVT VT, const ARMSubtarget *Subtarget)
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
unsigned getSizeInBits() const
getSizeInBits - Return the size of the specified value type in bits.
static bool isLegalAddressImmediate(int64_t V, EVT VT, const ARMSubtarget *Subtarget)
isLegalAddressImmediate - Return true if the integer value can be used as the offset of the target ad...
void ReplaceAllUsesWith(SDValue From, SDValue Op)
Modify anything using 'From' to use 'To' instead.
void setReg(unsigned Reg)
Change the register this operand corresponds to.
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned char TargetFlags=0) const
void setArgRegsSaveSize(unsigned s)
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
bool isOSMSVCRT() const
Is this a "Windows" OS targeting a "MSVCRT.dll" environment.
virtual std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const
Return the largest legal super-reg register class of the register class for the specified type and it...
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
static void emitPostSt(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos, const TargetInstrInfo *TII, const DebugLoc &dl, unsigned StSize, unsigned Data, unsigned AddrIn, unsigned AddrOut, bool IsThumb1, bool IsThumb2)
Emit a post-increment store operation with given size.
APFloat abs(APFloat X)
Returns the absolute value of the argument.
const ARMBaseRegisterInfo * getRegisterInfo() const override
static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, ARMCC::CondCodes &CondCode2)
FPCCToARMCC - Convert a DAG fp condition code to an ARM CC.
The memory access always returns the same value (or traps).
CallInst * CreateCall(Value *Callee, ArrayRef< Value * > Args=None, const Twine &Name="", MDNode *FPMathTag=nullptr)
unsigned MaxStoresPerMemmoveOptSize
Maximum number of store instructions that may be substituted for a call to memmove, used for functions with OptSize attribute.
unsigned MaxStoresPerMemcpyOptSize
Maximum number of store operations that may be substituted for a call to memcpy, used for functions w...
static cl::opt< unsigned > ConstpoolPromotionMaxTotal("arm-promote-constant-max-total", cl::Hidden, cl::desc("Maximum size of ALL constants to promote into a constant pool"), cl::init(128))
void setStackPointerRegisterToSaveRestore(unsigned R)
If set to a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save and restore.
bool allowsUnalignedMem() const
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
VectorType * getType() const
Overload to return most specific vector type.
bool isLegalAddImmediate(int64_t Imm) const override
isLegalAddImmediate - Return true if the specified immediate is legal add immediate, that is the target has add instructions which can add a register and the immediate without having to materialize the immediate into a register.
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, bool isSEXTLoad, SDValue &Base, SDValue &Offset, bool &isInc, SelectionDAG &DAG)
static SDValue LowerSETCCE(SDValue Op, SelectionDAG &DAG)
FSINCOS - Compute both fsin and fcos as a single operation.
unsigned MaxStoresPerMemcpy
Specify maximum bytes of store instructions per memcpy call.
constexpr bool isUInt< 16 >(uint64_t x)
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
getEVT - Return the value type corresponding to the specified type.
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override
Returns true if the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass into a ...
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.setjmp intrinsic.
static SDValue AddCombineTo64bitMLAL(SDNode *AddcNode, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
int32_t getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, uint32_t BitWidth) const
If this is a constant FP splat and the splatted constant FP is an exact power or 2, return the log base 2 integer value.
static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
void removeSuccessor(MachineBasicBlock *Succ, bool NormalizeSuccProbs=false)
Remove successor from the successors list of this MachineBasicBlock.
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
EVT getValueType() const
Return the ValueType of the referenced return value.
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
MachineInstr * getVRegDef(unsigned Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
bool useMovt(const MachineFunction &MF) const
SDValue getCondCode(ISD::CondCode Cond)
EnvironmentType getEnvironment() const
getEnvironment - Get the parsed environment type of this triple.
Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but op #2 is a carry value...
Libcall getSINTTOFP(EVT OpVT, EVT RetVT)
getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
bool is128BitVector() const
is128BitVector - Return true if this is a 128-bit vector type.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
static bool isGTorGE(ISD::CondCode CC)
static SDValue PerformVLDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
unsigned getReg() const
getReg - Returns the register number.
StringRef getValueAsString() const
Return the attribute's value as a string.
bool isFloatingPoint() const
isFloatingPoint - Return true if this is a FP, or a vector FP type.
static RTLIB::Libcall getDivRemLibcall(const SDNode *N, MVT::SimpleValueType SVT)
bool isLegalICmpImmediate(int64_t Imm) const override
isLegalICmpImmediate - Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructions which can compare a register against the immediate without having to materialize the immediate into a register.
bool isTargetMuslAEABI() const
static bool isVTRN_v_undef_Mask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of "vector_shuffle v...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes, SDValue &CC, bool &Invert, SDValue &OtherOp, SelectionDAG &DAG)
void insert(iterator MBBI, MachineBasicBlock *MBB)
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
SDValue PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const
PerformBRCONDCombine - Target-specific DAG combining for ARMISD::BRCOND.
void setReturnAddressIsTaken(bool s)
bool isSimple() const
isSimple - Test if the given EVT is simple (as opposed to being extended).
const unsigned char * bytes_begin() const
unsigned getAlignment() const
static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
void setMinStackArgumentAlignment(unsigned Align)
Set the minimum stack alignment of an argument (in log2(bytes)).
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
bool isShuffleMaskLegal(const SmallVectorImpl< int > &M, EVT VT) const override
isShuffleMaskLegal - Targets can use this to indicate that they only support some VECTOR_SHUFFLE oper...
CallLoweringInfo & setInRegister(bool Value=true)
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
static bool isSaturatingConditional(const SDValue &Op, SDValue &V, uint64_t &K)
Module * getParent()
Get the module that this global value is contained inside of...
LLVM Value Representation.
FMA - Perform a * b + c with no intermediate rounding step.
int getVarArgsFrameIndex() const
SDValue getRegister(unsigned Reg, EVT VT)
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
bool useLoadStackGuardNode() const override
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
Instruction * makeDMB(IRBuilder<> &Builder, ARM_MB::MemBOpt Domain) const
static VectorType * get(Type *ElementType, unsigned NumElements)
This static method is the primary way to construct an VectorType.
static unsigned getLdOpcode(unsigned LdSize, bool IsThumb1, bool IsThumb2)
Return the load opcode for a given load size.
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
bool isTruncatingStore() const
Return true if the op does a truncation before store.
void push_back(MachineBasicBlock *MBB)
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond)
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
ARMTargetLowering(const TargetMachine &TM, const ARMSubtarget &STI)
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
SDValue getValueType(EVT)
const MCOperandInfo * OpInfo
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
bool isWindowsItaniumEnvironment() const
PREFETCH - This corresponds to a prefetch intrinsic.
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override
Returns true if an argument of type Ty needs to be passed in a contiguous block of registers in calli...
MO_NONLAZY - This is an independent flag, on a symbol operand "FOO" it represents a symbol which...
const MachineInstrBuilder & addOperand(const MachineOperand &MO) const
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint letter, return the type of constraint it is for this target...
uint64_t getTypeSizeInBits(Type *Ty) const
Size examples:
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
const TargetLowering & getTargetLoweringInfo() const
unsigned getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
void rewindByValRegsInfo()
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.Val alone...
bool preferISHSTBarriers() const
unsigned countLeadingZeros() const
The APInt version of the countLeadingZeros functions in MathExtras.h.
Primary interface to the complete machine description for the target machine.
bool hasDataBarrier() const
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
static SDValue PerformSTORECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
PerformSTORECombine - Target-specific dag combine xforms for ISD::STORE.
bool hasDivideInARMMode() const
static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG)
constexpr bool isShiftedMask_32(uint32_t Value)
isShiftedMask_32 - This function returns true if the argument contains a non-empty sequence of ones w...
StringRef - Represent a constant reference to a string, i.e.
SetCC operator - This evaluates to a true value iff the condition is true.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
unsigned MaxStoresPerMemsetOptSize
Maximum number of stores operations that may be substituted for the call to memset, used for functions with OptSize attribute.
static MachineBasicBlock * OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ)
static BranchProbability getZero()
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml","ocaml 3.10-compatible collector")
static SDValue LowerFPOWI(SDValue Op, const ARMSubtarget &Subtarget, SelectionDAG &DAG)
bool operator==(uint64_t V1, const APInt &V2)
bool hasOptionalDef(QueryType Type=IgnoreBundle) const
Set if this instruction has an optional definition, e.g.
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
unsigned getLocMemOffset() const
MVT getVectorElementType() const
static bool isVolatile(Instruction *Inst)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG)
TRUNCATE - Completely drop the high bits.
bool isUIntN(unsigned N, uint64_t x)
isUIntN - Checks if an unsigned integer fits into the given (dynamic) bit width.
unsigned getAlignment() const
bool isBitFieldInvertedMask(unsigned v)
unsigned AllocateReg(unsigned Reg)
AllocateReg - Attempt to allocate one register.
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass...
static void getShuffleMask(Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const SDLoc &dl)
GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit the specified operations t...
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override
Returns true if the given (atomic) store should be expanded by the IR-level AtomicExpand pass into an...
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
static SDValue AddCombineBUILD_VECTORToVPADDL(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static GCRegistry::Add< ErlangGC > A("erlang","erlang-compatible garbage collector")
bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const
static bool allUsersAreInFunctions(const Value *V)
Return true if all users of V are within some (any) function, looking through ConstantExprs.
bool is64BitVector() const
is64BitVector - Return true if this is a 64-bit vector type.
FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW, FLOG, FLOG2, FLOG10, FEXP, FEXP2, FCEIL, FTRUNC, FRINT, FNEARBYINT, FROUND, FFLOOR - Perform various unary floating point operations.
bool isEmpty() const
Returns true if there are no itineraries.
Value * getPointerOperand()
ARM_AAPCS - ARM Architecture Procedure Calling Standard calling convention (aka EABI).
Fast - This calling convention attempts to make calls as fast as possible (e.g.
MachineInstr::mmo_iterator allocateMemRefsArray(unsigned long Num)
allocateMemRefsArray - Allocate an array to hold MachineMemOperand pointers.
static IntegerType * getInt8Ty(LLVMContext &C)
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
bool isTargetWindows() const
ARM_AAPCS_VFP - Same as ARM_AAPCS, but uses hard floating point ABI.
const BasicBlock * getParent() const
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
static ARMConstantPoolConstant * Create(const Constant *C, unsigned ID)
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
static bool isSplatMask(const int *Mask, EVT VT)
unsigned getConstantPoolIndex(const Constant *C, unsigned Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one...
static bool isVEXTMask(ArrayRef< int > M, EVT VT, bool &ReverseVEXT, unsigned &Imm)
EVT changeVectorElementTypeToInteger() const
changeVectorElementTypeToInteger - Return a vector with the same number of elements as this vector...
static bool memOpAlign(unsigned DstAlign, unsigned SrcAlign, unsigned AlignCheck)
bool PredictableSelectIsExpensive
Tells the code generator that select is more expensive than a branch if the branch is usually predict...
const uint32_t * getTLSCallPreservedMask(const MachineFunction &MF) const
static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG)
Instruction * emitTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord, bool IsStore, bool IsLoad) const override
auto count_if(R &&Range, UnaryPredicate P) -> typename std::iterator_traits< decltype(std::begin(Range))>::difference_type
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
MVT getSimpleVT() const
getSimpleVT - Return the SimpleValueType held in the specified simple EVT.
static const MachineInstrBuilder & AddDefaultT1CC(const MachineInstrBuilder &MIB, bool isDead=false)
CARRY_FALSE - This node is used when folding other nodes, like ADDC/SUBC, which indicate the carry re...
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode...
LLVMContext & getContext() const
Get the global data context.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned char TargetFlags=0)
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, unsigned Align, bool *Fast) const override
allowsMisalignedMemoryAccesses - Returns true if the target allows unaligned memory accesses of the s...
void setIndexedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
FloatABI::ABIType FloatABIType
FloatABIType - This setting is set by -float-abi=xxx option is specfied on the command line...
uint64_t getZExtValue() const
static uint64_t decodeNEONModImm(unsigned ModImm, unsigned &EltBits)
decodeNEONModImm - Decode a NEON modified immediate value into the element value and the element size...
void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero, APInt &KnownOne, const SelectionDAG &DAG, unsigned Depth) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
static TargetLowering::ArgListTy getDivRemArgList(const SDNode *N, LLVMContext *Context, const ARMSubtarget *Subtarget)
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, unsigned SplatBitSize, SelectionDAG &DAG, const SDLoc &dl, EVT &VT, bool is128Bits, NEONModImmType type)
isNEONModifiedImm - Check if the specified splat value corresponds to a valid vector constant for a N...
unsigned getVectorNumElements() const
getVectorNumElements - Given a vector type, return the number of elements it contains.
This class is used to represent ISD::LOAD nodes.
static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V)
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary...