57 #define DEBUG_TYPE "arm-isel"
59 STATISTIC(NumTailCalls,
"Number of tail calls");
60 STATISTIC(NumMovwMovt,
"Number of GAs materialized with movw + movt");
61 STATISTIC(NumLoopByVals,
"Number of loops generated for byval arguments");
65 cl::desc(
"Enable / disable ARM interworking (for debugging only)"),
69 class ARMCCState :
public CCState {
74 :
CCState(CC, isVarArg, MF, locs, C) {
76 "ARMCCState users must specify whether their context is call"
77 "or prologue generation.");
85 ARM::R0, ARM::R1,
ARM::R2, ARM::R3
88 void ARMTargetLowering::addTypeForNEON(
MVT VT,
MVT PromotedLdStVT,
89 MVT PromotedBitwiseVT) {
90 if (VT != PromotedLdStVT) {
129 if (VT.
isInteger() && VT != PromotedBitwiseVT) {
147 void ARMTargetLowering::addDRTypeForNEON(
MVT VT) {
152 void ARMTargetLowering::addQRTypeForNEON(
MVT VT) {
250 static const struct {
252 const char *
const Name;
345 for (
const auto &LC : LibraryCalls) {
354 static const struct {
356 const char *
const Name;
369 for (
const auto &LC : LibraryCalls) {
992 std::pair<const TargetRegisterClass *, uint8_t>
1005 RRC = &ARM::DPRRegClass;
1015 RRC = &ARM::DPRRegClass;
1019 RRC = &ARM::DPRRegClass;
1023 RRC = &ARM::DPRRegClass;
1027 return std::make_pair(RRC, Cost);
1183 return &ARM::QQPRRegClass;
1185 return &ARM::QQQQPRRegClass;
1194 unsigned &PrefAlign)
const {
1195 if (!isa<MemIntrinsic>(CI))
1216 for (
unsigned i = 0; i != NumVals; ++i) {
1295 #include "ARMGenCallingConv.inc"
1302 bool isVarArg)
const {
1337 bool isVarArg)
const {
1338 switch (getEffectiveCallingConv(CC, isVarArg)) {
1342 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS);
1344 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
1346 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
1348 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
1350 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC);
1357 ARMTargetLowering::LowerCallResult(
SDValue Chain,
SDValue InFlag,
1362 bool isThisReturn,
SDValue ThisVal)
const {
1368 CCInfo.AnalyzeCallResult(Ins,
1369 CCAssignFnForNode(CallConv,
true,
1373 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
1378 if (i == 0 && isThisReturn) {
1380 "unexpected return calling convention register assignment");
1443 ARMTargetLowering::LowerMemOpCallTo(
SDValue Chain,
1452 return DAG.
getStore(Chain, dl, Arg, PtrOff,
1459 RegsToPassVector &RegsToPass,
1467 unsigned id = Subtarget->
isLittle() ? 0 : 1;
1503 bool isStructRet = (Outs.
empty()) ?
false : Outs[0].Flags.
isSRet();
1504 bool isThisReturn =
false;
1505 bool isSibCall =
false;
1514 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
1516 Outs, OutVals,
Ins, DAG);
1519 "site marked musttail");
1532 CCInfo.AnalyzeCallOperands(Outs,
1533 CCAssignFnForNode(CallConv,
false,
1537 unsigned NumBytes = CCInfo.getNextStackOffset();
1552 RegsToPassVector RegsToPass;
1557 for (
unsigned i = 0, realArgIdx = 0, e = ArgLocs.
size();
1559 ++i, ++realArgIdx) {
1561 SDValue Arg = OutVals[realArgIdx];
1563 bool isByVal = Flags.
isByVal();
1591 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass,
1592 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
1596 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass,
1597 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
1601 MemOpChains.
push_back(LowerMemOpCallTo(Chain, StackPtr, Op1,
1602 dl, DAG, VA, Flags));
1605 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i],
1606 StackPtr, MemOpChains, Flags);
1611 "unexpected calling convention register assignment");
1612 assert(!Ins.empty() && Ins[0].VT ==
MVT::i32 &&
1613 "unexpected use of 'returned'");
1614 isThisReturn =
true;
1616 RegsToPass.push_back(std::make_pair(VA.
getLocReg(), Arg));
1617 }
else if (isByVal) {
1619 unsigned offset = 0;
1623 unsigned ByValArgsCount = CCInfo.getInRegsParamsCount();
1624 unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed();
1626 if (CurByValIdx < ByValArgsCount) {
1628 unsigned RegBegin, RegEnd;
1629 CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd);
1634 for (i = 0, j = RegBegin; j < RegEnd; i++, j++) {
1639 false,
false,
false,
1642 RegsToPass.push_back(std::make_pair(j, Load));
1647 offset = RegEnd - RegBegin;
1649 CCInfo.nextInRegsParam();
1665 SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode};
1669 }
else if (!isSibCall) {
1672 MemOpChains.
push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
1673 dl, DAG, VA, Flags));
1677 if (!MemOpChains.
empty())
1686 for (
unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1687 Chain = DAG.
getCopyToReg(Chain, dl, RegsToPass[i].first,
1688 RegsToPass[i].second, InFlag);
1703 for (
unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1704 Chain = DAG.
getCopyToReg(Chain, dl, RegsToPass[i].first,
1705 RegsToPass[i].second, InFlag);
1714 bool isDirect =
false;
1715 bool isARMFunc =
false;
1716 bool isLocalARMFunc =
false;
1723 "long-calls with non-static relocation model!");
1741 const char *Sym = S->getSymbol();
1747 ARMPCLabelIndex, 0);
1761 isARMFunc = !Subtarget->
isThumb() || (isStub && !Subtarget->
isMClass());
1766 assert(Subtarget->
isTargetMachO() &&
"WrapperPIC use on non-MachO?");
1774 "Windows is the only supported COFF target");
1787 unsigned OpFlags = 0;
1797 isARMFunc = !Subtarget->
isThumb() || (isStub && !Subtarget->
isMClass());
1799 const char *Sym = S->getSymbol();
1804 ARMPCLabelIndex, 4);
1813 unsigned OpFlags = 0;
1826 if ((!isDirect || isARMFunc) && !Subtarget->
hasV5TOps())
1831 if (!isDirect && !Subtarget->
hasV5TOps())
1833 else if (doesNotRet && isDirect && Subtarget->
hasRAS() &&
1842 std::vector<SDValue> Ops;
1843 Ops.push_back(Chain);
1844 Ops.push_back(Callee);
1848 for (
unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1849 Ops.push_back(DAG.
getRegister(RegsToPass[i].first,
1850 RegsToPass[i].second.getValueType()));
1854 const uint32_t *Mask;
1863 isThisReturn =
false;
1869 assert(Mask &&
"Missing call preserved mask for calling convention");
1874 Ops.push_back(InFlag);
1883 Chain = DAG.
getNode(CallOpc, dl, NodeTys, Ops);
1893 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
1894 InVals, isThisReturn,
1895 isThisReturn ? OutVals[0] :
SDValue());
1902 void ARMTargetLowering::HandleByVal(
CCState *State,
unsigned &Size,
1903 unsigned Align)
const {
1906 "unhandled ParmContext");
1909 Align = std::max(Align, 4U);
1915 unsigned AlignInRegs = Align / 4;
1916 unsigned Waste = (
ARM::R4 -
Reg) % AlignInRegs;
1917 for (
unsigned i = 0; i < Waste; ++i)
1930 if (NSAAOffset != 0 && Size > Excess) {
1942 unsigned ByValRegBegin =
Reg;
1943 unsigned ByValRegEnd = std::min<unsigned>(Reg + Size / 4,
ARM::R4);
1947 for (
unsigned i = Reg + 1; i != ByValRegEnd; ++i)
1953 Size = std::max<int>(Size - Excess, 0);
1978 }
else if (
LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
1986 SDValue Ptr = Ld->getBasePtr();
1994 assert(FI != INT_MAX);
2004 ARMTargetLowering::IsEligibleForTailCallOptimization(
SDValue Callee,
2007 bool isCalleeStructRet,
2008 bool isCallerStructRet,
2015 bool CCMatch = CallerCC == CalleeCC;
2022 if (isVarArg && !Outs.
empty())
2033 if (isCalleeStructRet || isCallerStructRet)
2077 CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC,
true, isVarArg));
2082 CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC,
true, isVarArg));
2084 if (RVLocs1.
size() != RVLocs2.
size())
2086 for (
unsigned i = 0, e = RVLocs1.
size(); i != e; ++i) {
2087 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
2089 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
2091 if (RVLocs1[i].isRegLoc()) {
2092 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
2095 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
2105 getInfo<ARMFunctionInfo>();
2111 if (!Outs.
empty()) {
2117 CCInfo.AnalyzeCallOperands(Outs,
2118 CCAssignFnForNode(CalleeCC,
false, isVarArg));
2119 if (CCInfo.getNextStackOffset()) {
2127 for (
unsigned i = 0, realArgIdx = 0, e = ArgLocs.
size();
2129 ++i, ++realArgIdx) {
2132 SDValue Arg = OutVals[realArgIdx];
2143 if (!ArgLocs[++i].isRegLoc())
2146 if (!ArgLocs[++i].isRegLoc())
2148 if (!ArgLocs[++i].isRegLoc())
2169 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2170 return CCInfo.CheckReturn(Outs, CCAssignFnForNode(CallConv,
true,
2192 if (IntKind ==
"" || IntKind ==
"IRQ" || IntKind ==
"FIQ" ||
2195 else if (IntKind ==
"SWI" || IntKind ==
"UNDEF")
2199 "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF");
2208 ARMTargetLowering::LowerReturn(
SDValue Chain,
2222 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv,
true,
2228 bool isLittleEndian = Subtarget->
isLittle();
2235 for (
unsigned i = 0, realRVLocIdx = 0;
2237 ++i, ++realRVLocIdx) {
2239 assert(VA.
isRegLoc() &&
"Can only return in registers!");
2241 SDValue Arg = OutVals[realRVLocIdx];
2260 HalfGPRs.
getValue(isLittleEndian ? 0 : 1),
2266 HalfGPRs.
getValue(isLittleEndian ? 1 : 0),
2281 fmrrd.
getValue(isLittleEndian ? 0 : 1),
2287 fmrrd.
getValue(isLittleEndian ? 1 : 0),
2319 bool ARMTargetLowering::isUsedByReturnOnly(
SDNode *
N,
SDValue &Chain)
const {
2343 if (Copies.
size() > 2)
2378 bool HasRet =
false;
2394 bool ARMTargetLowering::mayBeEmittedAsTailCall(
CallInst *CI)
const {
2400 if (!CI->
isTailCall() || Attr.getValueAsString() ==
"true")
2414 &&
"LowerWRITE_REGISTER called for non-i64 type argument.");
2453 unsigned ARMPCLabelIndex = 0;
2456 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
2462 unsigned PCAdj = Subtarget->
isThumb() ? 4 : 8;
2472 false,
false,
false, 0);
2485 unsigned char PCAdj = Subtarget->
isThumb() ? 4 : 8;
2496 false,
false,
false, 0);
2505 Entry.Node = Argument;
2507 Args.push_back(Entry);
2516 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
2517 return CallResult.first;
2539 unsigned char PCAdj = Subtarget->
isThumb() ? 4 : 8;
2546 Offset = DAG.
getLoad(PtrVT, dl, Chain, Offset,
2548 false,
false,
false, 0);
2554 Offset = DAG.
getLoad(PtrVT, dl, Chain, Offset,
2556 false,
false,
false, 0);
2564 Offset = DAG.
getLoad(PtrVT, dl, Chain, Offset,
2566 false,
false,
false, 0);
2578 "TLS not implemented for non-ELF targets");
2586 return LowerToTLSGeneralDynamicModel(GA, DAG);
2589 return LowerToTLSExecModels(GA, DAG, model);
2598 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
2609 false,
false,
false, 0);
2614 Result = DAG.
getLoad(PtrVT, dl, Chain, Result,
2616 false,
false,
false, 0);
2633 false,
false,
false, 0);
2641 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
2663 assert(Subtarget->
isTargetWindows() &&
"non-Windows COFF is not supported");
2665 "Windows on ARM expects to use movw/movt");
2667 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
2690 "GLOBAL OFFSET TABLE not implemented for non-ELF targets");
2696 unsigned PCAdj = Subtarget->
isThumb() ? 4 : 8;
2699 ARMPCLabelIndex, PCAdj);
2704 false,
false,
false, 0);
2728 unsigned IntNo = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
2732 case Intrinsic::arm_rbit: {
2734 "RBIT intrinsic must have i32 type!");
2737 case Intrinsic::arm_thread_pointer: {
2741 case Intrinsic::eh_sjlj_lsda: {
2749 ? 0 : (Subtarget->
isThumb() ? 4 : 8);
2758 false,
false,
false, 0);
2766 case Intrinsic::arm_neon_vmulls:
2767 case Intrinsic::arm_neon_vmullu: {
2768 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls)
2785 "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!");
2817 unsigned isRead = ~cast<ConstantSDNode>(Op.
getOperand(2))->getZExtValue() & 1;
2823 unsigned isData = cast<ConstantSDNode>(Op.
getOperand(4))->getZExtValue();
2826 isRead = ~isRead & 1;
2827 isData = ~isData & 1;
2858 RC = &ARM::tGPRRegClass;
2860 RC = &ARM::GPRRegClass;
2875 false,
false,
false, 0);
2896 const Value *OrigArg,
2897 unsigned InRegsParamRecordIdx,
2899 unsigned ArgSize)
const {
2914 unsigned RBegin, REnd;
2924 ArgOffset = -4 * (
ARM::R4 - RBegin);
2934 for (
unsigned Reg = RBegin, i = 0; Reg < REnd; ++
Reg, ++i) {
2944 if (!MemOps.
empty())
2954 unsigned TotalArgRegsSaveSize,
2955 bool ForceMutable)
const {
2964 int FrameIndex = StoreByValRegs(CCInfo, DAG, dl, Chain,
nullptr,
2971 ARMTargetLowering::LowerFormalArguments(
SDValue Chain,
2988 CCAssignFnForNode(CallConv,
false,
2994 unsigned CurArgIdx = 0;
3006 unsigned ArgRegBegin =
ARM::R4;
3007 for (
unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3017 assert(VA.
isMemLoc() &&
"unexpected byval pointer in reg");
3018 unsigned RBegin, REnd;
3020 ArgRegBegin =
std::min(ArgRegBegin, RBegin);
3026 int lastInsIndex = -1;
3033 unsigned TotalArgRegsSaveSize = 4 * (
ARM::R4 - ArgRegBegin);
3037 for (
unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3039 if (Ins[VA.
getValNo()].isOrigArg()) {
3041 Ins[VA.
getValNo()].getOrigArgIndex() - CurArgIdx);
3042 CurArgIdx = Ins[VA.
getValNo()].getOrigArgIndex();
3052 SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i],
3061 false,
false,
false, 0);
3063 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i],
3068 ArgValue, ArgValue1,
3071 ArgValue, ArgValue2,
3074 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
3080 RC = &ARM::SPRRegClass;
3082 RC = &ARM::DPRRegClass;
3084 RC = &ARM::QPRRegClass;
3087 : &ARM::GPRRegClass;
3129 if (index != lastInsIndex)
3138 assert(Ins[index].isOrigArg() &&
3139 "Byval arguments cannot be implicit");
3142 int FrameIndex = StoreByValRegs(CCInfo, DAG, dl, Chain, CurOrigArg,
3156 false,
false,
false, 0));
3158 lastInsIndex = index;
3165 VarArgStyleRegisters(CCInfo, DAG, dl, Chain,
3167 TotalArgRegsSaveSize);
3177 return CFP->getValueAPF().isPosZero();
3183 if (
const ConstantFP *CFP = dyn_cast<ConstantFP>(
CP->getConstVal()))
3184 return CFP->getValueAPF().isPosZero();
3194 cast<ConstantSDNode>(MoveOp)->getZExtValue() == 0) {
3209 unsigned C = RHSC->getZExtValue();
3284 assert(Opc ==
ARMISD::FMSTAT &&
"unexpected comparison operation");
3296 std::pair<SDValue, SDValue>
3336 return std::make_pair(Value, OverflowCmp);
3348 std::tie(Value, OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc);
3357 ARMcc, CCR, OverflowCmp);
3379 std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc);
3383 return getCMOV(dl, VT, SelectTrue, SelectFalse, ARMcc, CCR,
3398 if (CMOVTrue && CMOVFalse) {
3400 unsigned CMOVFalseVal = CMOVFalse->getZExtValue();
3404 if (CMOVTrueVal == 1 && CMOVFalseVal == 0) {
3406 False = SelectFalse;
3407 }
else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) {
3418 return getCMOV(dl, VT, True, False, ARMcc, CCR, Cmp, DAG);
3434 bool &swpCmpOps,
bool &swpVselOps) {
3462 swpCmpOps = !swpCmpOps;
3463 swpVselOps = !swpVselOps;
3499 ARMcc, CCR, duplicateCmp(Cmp, DAG));
3552 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
3553 return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
3567 bool swapSides =
false;
3589 swapSides = swapSides || (LHS == FalseVal && RHS == TrueVal);
3594 if (LHS == TrueVal && RHS == FalseVal) {
3595 bool canTransform =
true;
3669 bool swpCmpOps =
false;
3670 bool swpVselOps =
false;
3683 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
3685 SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
3689 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl);
3690 Result = getCMOV(dl, VT, Result, TrueVal, ARMcc2, CCR, Cmp2, DAG);
3722 if (
LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op))
3724 Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(),
3725 Ld->isVolatile(), Ld->isNonTemporal(),
3726 Ld->isInvariant(), Ld->getAlignment());
3741 if (
LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) {
3742 SDValue Ptr = Ld->getBasePtr();
3744 Ld->getChain(), Ptr,
3745 Ld->getPointerInfo(),
3746 Ld->isVolatile(), Ld->isNonTemporal(),
3747 Ld->isInvariant(), Ld->getAlignment());
3750 unsigned NewAlign =
MinAlign(Ld->getAlignment(), 4);
3754 Ld->getChain(), NewPtr,
3755 Ld->getPointerInfo().getWithOffset(4),
3756 Ld->isVolatile(), Ld->isNonTemporal(),
3757 Ld->isInvariant(), NewAlign);
3775 bool LHSSeenZero =
false;
3777 bool RHSSeenZero =
false;
3779 if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) {
3795 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
3798 Chain, Dest, ARMcc, CCR, Cmp);
3810 SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest };
3839 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
3842 Chain, Dest, ARMcc, CCR, Cmp);
3850 SDValue Result = OptimizeVFPBrcond(Op, DAG);
3859 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
3862 SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp };
3895 false,
false,
false, 0);
3900 Addr = DAG.
getLoad(PTy, dl, Chain, Addr,
3902 false,
false,
false, 0);
3919 "Invalid type for custom lowering!");
3940 false,
SDLoc(Op)).first;
3957 "Invalid type for custom lowering!");
3976 return DAG.
getNode(Opc, dl, VT, Op);
3992 false,
SDLoc(Op)).first;
4007 bool UseNEON = !InGPR && Subtarget->
hasNEON();
4090 unsigned Depth = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
4092 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
4113 unsigned Depth = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
4119 false,
false,
false, 0);
4125 unsigned ARMTargetLowering::getRegisterByName(
const char* RegName,
EVT VT,
4128 .Case(
"sp", ARM::SP)
4144 &&
"ExpandREAD_REGISTER called for non-i64 type result.");
4171 "ExpandBITCAST called for non-i64 type");
4208 assert(VT.
isVector() &&
"Expected a vector type");
4249 return DAG.getMergeValues(Ops, dl);
4258 unsigned VTBits = VT.getSizeInBits();
4283 return DAG.getMergeValues(Ops, dl);
4326 if ((ElemTy ==
MVT::i16 || ElemTy == MVT::i32) &&
4373 if (ElemTy == MVT::i32)
4487 assert(ST->
hasNEON() &&
"Custom ctpop lowering requires NEON.");
4490 "Unexpected type for custom ctpop lowering");
4507 assert(ST->
hasNEON() &&
"unexpected vector shift");
4527 Intrinsic::arm_neon_vshifts :
4528 Intrinsic::arm_neon_vshiftu);
4544 "Unknown shift to lower!");
4547 if (!isa<ConstantSDNode>(N->
getOperand(1)) ||
4548 cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue() != 1)
4574 bool Invert =
false;
4583 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->
get();
4593 switch (SetCCOpcode) {
4632 switch (SetCCOpcode) {
4698 Result = DAG.
getNode(Opc, dl, CmpVT, Op0, Op1);
4701 Result = DAG.
getNode(Opc, dl, CmpVT, Op0, Op1);
4707 Result = DAG.
getNOT(dl, Result, VT);
4719 unsigned OpCmode, Imm;
4729 switch (SplatBitSize) {
4734 assert((SplatBits & ~0xff) == 0 &&
"one byte splat value is too big");
4743 if ((SplatBits & ~0xff) == 0) {
4749 if ((SplatBits & ~0xff00) == 0) {
4752 Imm = SplatBits >> 8;
4763 if ((SplatBits & ~0xff) == 0) {
4769 if ((SplatBits & ~0xff00) == 0) {
4772 Imm = SplatBits >> 8;
4775 if ((SplatBits & ~0xff0000) == 0) {
4778 Imm = SplatBits >> 16;
4781 if ((SplatBits & ~0xff000000) == 0) {
4784 Imm = SplatBits >> 24;
4791 if ((SplatBits & ~0xffff) == 0 &&
4792 ((SplatBits | SplatUndef) & 0xff) == 0xff) {
4795 Imm = SplatBits >> 8;
4799 if ((SplatBits & ~0xffffff) == 0 &&
4800 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
4803 Imm = SplatBits >> 16;
4818 uint64_t BitMask = 0xff;
4822 for (
int ByteNum = 0; ByteNum < 8; ++ByteNum) {
4823 if (((SplatBits | SplatUndef) & BitMask) == BitMask) {
4826 }
else if ((SplatBits & BitMask) != 0) {
4835 Imm = ((Imm & 0xf) << 4) | ((Imm & 0xf0) >> 4);
4896 if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32))
4950 unsigned ExpectedElt = Imm;
4951 for (
unsigned i = 1; i < NumElts; ++i) {
4955 if (ExpectedElt == NumElts)
4958 if (M[i] < 0)
continue;
4959 if (ExpectedElt != static_cast<unsigned>(M[i]))
4968 bool &ReverseVEXT,
unsigned &Imm) {
4970 ReverseVEXT =
false;
4981 unsigned ExpectedElt = Imm;
4982 for (
unsigned i = 1; i < NumElts; ++i) {
4986 if (ExpectedElt == NumElts * 2) {
4991 if (M[i] < 0)
continue;
4992 if (ExpectedElt != static_cast<unsigned>(M[i]))
5007 assert((BlockSize==16 || BlockSize==32 || BlockSize==64) &&
5008 "Only possible block sizes for VREV are: 16, 32, 64");
5015 unsigned BlockElts = M[0] + 1;
5018 BlockElts = BlockSize / EltSz;
5020 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
5023 for (
unsigned i = 0; i < NumElts; ++i) {
5024 if (M[i] < 0)
continue;
5025 if ((
unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts))
5045 WhichResult = (M[0] == 0 ? 0 : 1);
5046 for (
unsigned i = 0; i < NumElts; i += 2) {
5047 if ((M[i] >= 0 && (
unsigned) M[i] != i + WhichResult) ||
5048 (M[i+1] >= 0 && (
unsigned) M[i+1] != i + NumElts + WhichResult))
5063 WhichResult = (M[0] == 0 ? 0 : 1);
5064 for (
unsigned i = 0; i < NumElts; i += 2) {
5065 if ((M[i] >= 0 && (
unsigned) M[i] != i + WhichResult) ||
5066 (M[i+1] >= 0 && (
unsigned) M[i+1] != i + WhichResult))
5078 WhichResult = (M[0] == 0 ? 0 : 1);
5079 for (
unsigned i = 0; i != NumElts; ++i) {
5080 if (M[i] < 0)
continue;
5081 if ((
unsigned) M[i] != 2 * i + WhichResult)
5101 WhichResult = (M[0] == 0 ? 0 : 1);
5102 for (
unsigned j = 0; j != 2; ++j) {
5103 unsigned Idx = WhichResult;
5104 for (
unsigned i = 0; i != Half; ++i) {
5105 int MIdx = M[i + j * Half];
5106 if (MIdx >= 0 && (
unsigned) MIdx != Idx)
5125 WhichResult = (M[0] == 0 ? 0 : 1);
5126 unsigned Idx = WhichResult * NumElts / 2;
5127 for (
unsigned i = 0; i != NumElts; i += 2) {
5128 if ((M[i] >= 0 && (
unsigned) M[i] != Idx) ||
5129 (M[i+1] >= 0 && (
unsigned) M[i+1] != Idx + NumElts))
5150 WhichResult = (M[0] == 0 ? 0 : 1);
5151 unsigned Idx = WhichResult * NumElts / 2;
5152 for (
unsigned i = 0; i != NumElts; i += 2) {
5153 if ((M[i] >= 0 && (
unsigned) M[i] != Idx) ||
5154 (M[i+1] >= 0 && (
unsigned) M[i+1] != Idx))
5169 unsigned &WhichResult,
5172 if (
isVTRNMask(ShuffleMask, VT, WhichResult))
5174 if (
isVUZPMask(ShuffleMask, VT, WhichResult))
5176 if (
isVZIPMask(ShuffleMask, VT, WhichResult))
5194 if (NumElts != M.
size())
5198 for (
unsigned i = 0; i != NumElts; ++i)
5199 if (M[i] >= 0 && M[i] != (
int) (NumElts - 1 - i))
5211 if (!isa<ConstantSDNode>(N))
5213 Val = cast<ConstantSDNode>(
N)->getZExtValue();
5216 if (Val <= 255 || ~Val <= 255)
5233 APInt SplatBits, SplatUndef;
5234 unsigned SplatBitSize;
5236 if (BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
5237 if (SplatBitSize <= 64) {
5250 uint64_t NegatedImm = (~SplatBits).getZExtValue();
5264 SDValue Val = DAG.getTargetConstant(ImmVal, dl, MVT::i32);
5279 bool isOnlyLowElement =
true;
5280 bool usesOnlyOneValue =
true;
5281 bool hasDominantValue =
false;
5282 bool isConstant =
true;
5288 for (
unsigned i = 0; i < NumElts; ++i) {
5293 isOnlyLowElement =
false;
5294 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
5297 ValueCounts.
insert(std::make_pair(V, 0));
5298 unsigned &Count = ValueCounts[V];
5301 if (++Count > (NumElts / 2)) {
5302 hasDominantValue =
true;
5306 if (ValueCounts.
size() != 1)
5307 usesOnlyOneValue =
false;
5309 Value = ValueCounts.
begin()->first;
5311 if (ValueCounts.
size() == 0)
5312 return DAG.getUNDEF(VT);
5323 if (hasDominantValue && EltSize <= 32) {
5341 assert(constIndex &&
"The index is not a constant!");
5346 Value, DAG.getConstant(index, dl, MVT::i32)),
5347 DAG.getConstant(index, dl, MVT::i32));
5354 if (!usesOnlyOneValue) {
5357 for (
unsigned I = 0;
I < NumElts; ++
I) {
5363 Ops.
push_back(DAG.getConstant(
I, dl, MVT::i32));
5371 for (
unsigned i = 0; i < NumElts; ++i)
5376 Val = LowerBUILD_VECTOR(Val, DAG, ST);
5380 if (usesOnlyOneValue) {
5382 if (isConstant && Val.
getNode())
5395 SDValue shuffle = ReconstructShuffle(Op, DAG);
5403 if (EltSize >= 32) {
5409 for (
unsigned i = 0; i < NumElts; ++i)
5421 if (!isConstant && !usesOnlyOneValue) {
5422 SDValue Vec = DAG.getUNDEF(VT);
5423 for (
unsigned i = 0 ; i < NumElts; ++i) {
5427 SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i32);
5448 for (
unsigned i = 0; i < NumElts; ++i) {
5472 unsigned EltNo = cast<ConstantSDNode>(V.
getOperand(1))->getZExtValue();
5473 bool FoundSource =
false;
5474 for (
unsigned j = 0; j < SourceVecs.
size(); ++j) {
5475 if (SourceVecs[j] == SourceVec) {
5476 if (MinElts[j] > EltNo)
5478 if (MaxElts[j] < EltNo)
5495 if (SourceVecs.
size() > 2)
5499 int VEXTOffsets[2] = {0, 0};
5503 for (
unsigned i = 0; i < SourceVecs.
size(); ++i) {
5506 ShuffleSrcs[i] = SourceVecs[i];
5517 assert(SourceVecs[i].
getValueType().getVectorNumElements() == 2*NumElts &&
5518 "unexpected vector sizes in ReconstructShuffle");
5520 if (MaxElts[i] - MinElts[i] >= NumElts) {
5525 if (MinElts[i] >= NumElts) {
5527 VEXTOffsets[i] = NumElts;
5531 }
else if (MaxElts[i] < NumElts) {
5539 VEXTOffsets[i] = MinElts[i];
5554 for (
unsigned i = 0; i < NumElts; ++i) {
5562 int ExtractElt = cast<ConstantSDNode>(Op.
getOperand(i)
5564 if (ExtractVec == SourceVecs[0]) {
5565 Mask.
push_back(ExtractElt - VEXTOffsets[0]);
5567 Mask.
push_back(ExtractElt + NumElts - VEXTOffsets[1]);
5588 unsigned PFIndexes[4];
5589 for (
unsigned i = 0; i != 4; ++i) {
5593 PFIndexes[i] = M[i];
5597 unsigned PFTableIndex =
5598 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
5600 unsigned Cost = (PFEntry >> 30);
5606 bool ReverseVEXT, isV_UNDEF;
5607 unsigned Imm, WhichResult;
5610 return (EltSize >= 32 ||
5626 unsigned OpNum = (PFEntry >> 26) & 0x0F;
5627 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
5628 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
5648 if (OpNum == OP_COPY) {
5649 if (LHSID == (1*9+2)*9+3)
return LHS;
5650 assert(LHSID == ((4*9+5)*9+6)*9+7 &&
"Illegal OP_COPY!");
5677 OpLHS, DAG.
getConstant(OpNum-OP_VDUP0, dl, MVT::i32));
5683 DAG.
getConstant(OpNum - OP_VEXT1 + 1, dl, MVT::i32));
5687 OpLHS, OpRHS).getValue(OpNum-OP_VUZPL);
5691 OpLHS, OpRHS).getValue(OpNum-OP_VZIPL);
5695 OpLHS, OpRHS).getValue(OpNum-OP_VTRNL);
5709 I = ShuffleMask.
begin(), E = ShuffleMask.
end();
I != E; ++
I)
5727 "Expect an v8i16/v16i8 type");
5732 unsigned ExtractNum = (VT ==
MVT::v16i8) ? 8 : 4;
5753 if (EltSize <= 32) {
5757 if (Lane == -1) Lane = 0;
5768 bool IsScalarToVector =
true;
5771 IsScalarToVector =
false;
5774 if (IsScalarToVector)
5783 if (
isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) {
5808 unsigned WhichResult;
5811 ShuffleMask, VT, WhichResult, isV_UNDEF)) {
5815 .getValue(WhichResult);
5840 assert(
std::all_of(ShuffleMask.begin(), ShuffleMask.end(), [&](
int i) {
5842 }) &&
"Unexpected shuffle index into UNDEF operand!");
5845 ShuffleMask, SubVT, WhichResult, isV_UNDEF)) {
5848 assert((WhichResult == 0) &&
5849 "In-place shuffle of concat can only have one result!");
5862 unsigned PFIndexes[4];
5863 for (
unsigned i = 0; i != 4; ++i) {
5864 if (ShuffleMask[i] < 0)
5867 PFIndexes[i] = ShuffleMask[i];
5871 unsigned PFTableIndex =
5872 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
5874 unsigned Cost = (PFEntry >> 30);
5881 if (EltSize >= 32) {
5889 for (
unsigned i = 0; i < NumElts; ++i) {
5890 if (ShuffleMask[i] < 0)
5894 ShuffleMask[i] < (
int)NumElts ? V1 : V2,
5917 if (!isa<ConstantSDNode>(Lane))
5926 if (!isa<ConstantSDNode>(Lane))
5943 "unexpected CONCAT_VECTORS");
5972 unsigned HiElt = 1 - LoElt;
5977 if (!Lo0 || !Hi0 || !Lo1 || !Hi1)
5980 if (Hi0->getSExtValue() == Lo0->
getSExtValue() >> 32 &&
5981 Hi1->getSExtValue() == Lo1->getSExtValue() >> 32)
5984 if (Hi0->isNullValue() && Hi1->isNullValue())
5997 unsigned HalfSize = EltSize / 2;
5999 if (!
isIntN(HalfSize, C->getSExtValue()))
6002 if (!
isUIntN(HalfSize, C->getZExtValue()))
6037 assert(OrigVT.
isSimple() &&
"Expecting a simple value type");
6040 switch (OrigSimpleTy) {
6056 unsigned ExtOpcode) {
6123 unsigned EltSize = VT.getVectorElementType().getSizeInBits() / 2;
6124 unsigned NumElts = VT.getVectorNumElements();
6128 for (
unsigned i = 0; i != NumElts; ++i) {
6166 "unexpected type for custom-lowering ISD::MUL");
6169 unsigned NewOpc = 0;
6173 if (isN0SExt && isN1SExt)
6178 if (isN0ZExt && isN1ZExt)
6180 else if (isN1SExt || isN1ZExt) {
6214 "unexpected types for extended operands to VMULL");
6215 return DAG.
getNode(NewOpc, DL, VT, Op0, Op1);
6229 return DAG.
getNode(N0->getOpcode(), DL, VT,
6248 DAG.
getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
6281 DAG.
getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
6284 DAG.
getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
6307 "unexpected type for custom-lowering ISD::SDIV");
6342 "unexpected type for custom-lowering ISD::UDIV");
6388 DAG.
getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
6391 DAG.
getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
6395 DAG.
getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
6420 bool ExtraOp =
false;
6454 const uint64_t ByteSize =
DL.getTypeAllocSize(RetTy);
6455 const unsigned StackAlign =
DL.getPrefTypeAlignment(RetTy);
6456 int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign,
false);
6464 Entry.isSExt =
false;
6465 Entry.isZExt =
false;
6466 Entry.isSRet =
true;
6467 Args.push_back(Entry);
6471 Entry.isSExt =
false;
6472 Entry.isZExt =
false;
6473 Args.push_back(Entry);
6475 const char *LibcallName = (ArgVT ==
MVT::f64)
6476 ?
"__sincos_stret" :
"__sincosf_stret";
6483 .setDiscardResult();
6485 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
6487 SDValue LoadSin = DAG.
getLoad(ArgVT, dl, CallResult.second, SRet,
6503 if (cast<AtomicSDNode>(Op)->getOrdering() <=
Monotonic)
6522 DAG.
getConstant(Intrinsic::arm_mrc, DL, MVT::i32),
6558 return LowerGlobalAddressWindows(Op, DAG);
6560 return LowerGlobalAddressELF(Op, DAG);
6562 return LowerGlobalAddressDarwin(Op, DAG);
6613 return LowerXALUO(Op, DAG);
6621 return LowerDYNAMIC_STACKALLOC(Op, DAG);
6661 void ARMTargetLowering::
6673 bool isThumb2 = Subtarget->
isThumb2();
6676 unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8;
6682 : &ARM::GPRRegClass;
6702 .addConstantPoolIndex(CPI)
6711 BuildMI(*MBB, MI, dl, TII->
get(ARM::tPICADD), NewVReg3)
6719 }
else if (isThumb) {
6729 .addConstantPoolIndex(CPI)
6732 BuildMI(*MBB, MI, dl, TII->
get(ARM::tPICADD), NewVReg2)
6746 BuildMI(*MBB, MI, dl, TII->
get(ARM::tADDframe), NewVReg5)
6761 .addConstantPoolIndex(CPI)
6776 void ARMTargetLowering::EmitSjLjDispatchBlock(
MachineInstr *MI,
6786 : &ARM::GPRnopcRegClass;
6791 unsigned MaxCSNum = 0;
6795 if (!BB->isLandingPad())
continue;
6800 II = BB->begin(),
IE = BB->end(); II !=
IE; ++II) {
6801 if (!II->isEHLabel())
continue;
6803 MCSymbol *Sym = II->getOperand(0).getMCSymbol();
6808 CSI = CallSiteIdxs.
begin(),
CSE = CallSiteIdxs.
end();
6809 CSI !=
CSE; ++CSI) {
6810 CallSiteNumToLPad[*CSI].push_back(BB);
6811 MaxCSNum = std::max(MaxCSNum, *CSI);
6818 std::vector<MachineBasicBlock*> LPadList;
6820 LPadList.reserve(CallSiteNumToLPad.
size());
6821 for (
unsigned I = 1;
I <= MaxCSNum; ++
I) {
6824 II = MBBList.
begin(),
IE = MBBList.
end(); II !=
IE; ++II) {
6825 LPadList.push_back(*II);
6826 InvokeBBs.
insert((*II)->pred_begin(), (*II)->pred_end());
6830 assert(!LPadList.empty() &&
6831 "No landing pad destinations for the dispatch jump table!");
6846 unsigned trap_opcode;
6848 trap_opcode = ARM::tTRAP;
6865 SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI);
6873 MIB =
BuildMI(DispatchBB, dl, TII->
get(ARM::Int_eh_sjlj_dispatchsetup));
6882 unsigned NumLPads = LPadList.size();
6890 if (NumLPads < 256) {
6893 .
addImm(LPadList.size()));
6897 .addImm(NumLPads & 0xFFFF));
6899 unsigned VReg2 = VReg1;
6900 if ((NumLPads & 0xFFFF0000) != 0) {
6904 .
addImm(NumLPads >> 16));
6912 BuildMI(DispatchBB, dl, TII->
get(ARM::t2Bcc))
6919 .addJumpTableIndex(MJTI));
6924 BuildMI(DispContBB, dl, TII->
get(ARM::t2ADDrs), NewVReg4)
6929 BuildMI(DispContBB, dl, TII->
get(ARM::t2BR_JT))
6933 }
else if (Subtarget->
isThumb()) {
6940 if (NumLPads < 256) {
6977 .addJumpTableIndex(MJTI));
6995 unsigned NewVReg6 = NewVReg5;
7004 BuildMI(DispContBB, dl, TII->
get(ARM::tBR_JTr))
7014 if (NumLPads < 256) {
7021 .addImm(NumLPads & 0xFFFF));
7023 unsigned VReg2 = VReg1;
7024 if ((NumLPads & 0xFFFF0000) != 0) {
7028 .
addImm(NumLPads >> 16));
7067 .addJumpTableIndex(MJTI));
7074 BuildMI(DispContBB, dl, TII->
get(ARM::LDRrs), NewVReg5)
7081 BuildMI(DispContBB, dl, TII->
get(ARM::BR_JTadd))
7086 BuildMI(DispContBB, dl, TII->
get(ARM::BR_JTr))
7094 for (std::vector<MachineBasicBlock*>::iterator
7095 I = LPadList.begin(), E = LPadList.end();
I != E; ++
I) {
7097 if (SeenMBBs.
insert(CurMBB).second)
7110 while (!Successors.empty()) {
7118 BB->addSuccessor(DispatchBB);
7125 II = BB->rbegin(),
IE = BB->rend(); II !=
IE; ++II) {
7126 if (!II->isCall())
continue;
7130 OI = II->operands_begin(), OE = II->operands_end();
7132 if (!OI->isReg())
continue;
7133 DefRegs[OI->getReg()] =
true;
7138 for (
unsigned i = 0; SavedRegs[i] != 0; ++i) {
7139 unsigned Reg = SavedRegs[i];
7141 !ARM::tGPRRegClass.contains(Reg) &&
7142 !ARM::hGPRRegClass.contains(Reg))
7144 if (Subtarget->
isThumb1Only() && !ARM::tGPRRegClass.contains(Reg))
7146 if (!Subtarget->
isThumb() && !ARM::GPRRegClass.contains(Reg))
7159 I = MBBLPads.
begin(), E = MBBLPads.
end();
I != E; ++
I)
7160 (*I)->setIsLandingPad(
false);
7177 static unsigned getLdOpcode(
unsigned LdSize,
bool IsThumb1,
bool IsThumb2) {
7179 return LdSize == 16 ? ARM::VLD1q32wb_fixed
7180 : LdSize == 8 ? ARM::VLD1d32wb_fixed : 0;
7182 return LdSize == 4 ? ARM::tLDRi
7183 : LdSize == 2 ? ARM::tLDRHi
7184 : LdSize == 1 ? ARM::tLDRBi : 0;
7186 return LdSize == 4 ? ARM::t2LDR_POST
7187 : LdSize == 2 ? ARM::t2LDRH_POST
7188 : LdSize == 1 ? ARM::t2LDRB_POST : 0;
7189 return LdSize == 4 ? ARM::LDR_POST_IMM
7190 : LdSize == 2 ? ARM::LDRH_POST
7191 : LdSize == 1 ? ARM::LDRB_POST_IMM : 0;
7196 static unsigned getStOpcode(
unsigned StSize,
bool IsThumb1,
bool IsThumb2) {
7198 return StSize == 16 ? ARM::VST1q32wb_fixed
7199 : StSize == 8 ? ARM::VST1d32wb_fixed : 0;
7201 return StSize == 4 ? ARM::tSTRi
7202 : StSize == 2 ? ARM::tSTRHi
7203 : StSize == 1 ? ARM::tSTRBi : 0;
7205 return StSize == 4 ? ARM::t2STR_POST
7206 : StSize == 2 ? ARM::t2STRH_POST
7207 : StSize == 1 ? ARM::t2STRB_POST : 0;
7208 return StSize == 4 ? ARM::STR_POST_IMM
7209 : StSize == 2 ? ARM::STRH_POST
7210 : StSize == 1 ? ARM::STRB_POST_IMM : 0;
7217 unsigned LdSize,
unsigned Data,
unsigned AddrIn,
7218 unsigned AddrOut,
bool IsThumb1,
bool IsThumb2) {
7219 unsigned LdOpc =
getLdOpcode(LdSize, IsThumb1, IsThumb2);
7220 assert(LdOpc != 0 &&
"Should have a load opcode");
7225 }
else if (IsThumb1) {
7228 .addReg(AddrIn).
addImm(0));
7230 BuildMI(*BB, Pos, dl, TII->
get(ARM::tADDi8), AddrOut);
7232 MIB.addReg(AddrIn).addImm(LdSize);
7234 }
else if (IsThumb2) {
7249 unsigned StSize,
unsigned Data,
unsigned AddrIn,
7250 unsigned AddrOut,
bool IsThumb1,
bool IsThumb2) {
7251 unsigned StOpc =
getStOpcode(StSize, IsThumb1, IsThumb2);
7252 assert(StOpc != 0 &&
"Should have a store opcode");
7256 }
else if (IsThumb1) {
7261 BuildMI(*BB, Pos, dl, TII->
get(ARM::tADDi8), AddrOut);
7263 MIB.addReg(AddrIn).addImm(StSize);
7265 }
else if (IsThumb2) {
7294 unsigned UnitSize = 0;
7299 bool IsThumb2 = Subtarget->
isThumb2();
7303 }
else if (Align & 2) {
7309 if ((Align % 16 == 0) && SizeVal >= 16)
7311 else if ((Align % 8 == 0) && SizeVal >= 8)
7320 bool IsNeon = UnitSize >= 8;
7321 TRC = (IsThumb1 || IsThumb2) ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
7323 VecTRC = UnitSize == 16 ? &ARM::DPairRegClass
7324 : UnitSize == 8 ? &ARM::DPRRegClass
7327 unsigned BytesLeft = SizeVal % UnitSize;
7328 unsigned LoopSize = SizeVal - BytesLeft;
7330 if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) {
7334 unsigned srcIn = src;
7335 unsigned destIn = dest;
7336 for (
unsigned i = 0; i < LoopSize; i+=UnitSize) {
7340 emitPostLd(BB, MI, TII, dl, UnitSize, scratch, srcIn, srcOut,
7341 IsThumb1, IsThumb2);
7342 emitPostSt(BB, MI, TII, dl, UnitSize, scratch, destIn, destOut,
7343 IsThumb1, IsThumb2);
7351 for (
unsigned i = 0; i < BytesLeft; i++) {
7355 emitPostLd(BB, MI, TII, dl, 1, scratch, srcIn, srcOut,
7356 IsThumb1, IsThumb2);
7357 emitPostSt(BB, MI, TII, dl, 1, scratch, destIn, destOut,
7358 IsThumb1, IsThumb2);
7398 if (Subtarget->
useMovt(*MF)) {
7399 unsigned Vtmp = varEnd;
7400 if ((LoopSize & 0xFFFF0000) != 0)
7403 TII->
get(IsThumb2 ? ARM::t2MOVi16 : ARM::MOVi16),
7404 Vtmp).addImm(LoopSize & 0xFFFF));
7406 if ((LoopSize & 0xFFFF0000) != 0)
7408 TII->
get(IsThumb2 ? ARM::t2MOVTi16 : ARM::MOVTi16),
7411 .
addImm(LoopSize >> 16));
7446 .addReg(varLoop).
addMBB(loopMBB)
7449 .addReg(srcLoop).
addMBB(loopMBB)
7452 .addReg(destLoop).
addMBB(loopMBB)
7459 IsThumb1, IsThumb2);
7461 IsThumb1, IsThumb2);
7466 BuildMI(*BB, BB->
end(), dl, TII->
get(ARM::tSUBi8), varLoop);
7468 MIB.addReg(varPhi).addImm(UnitSize);
7473 TII->
get(IsThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop);
7479 TII->
get(IsThumb1 ? ARM::tBcc : IsThumb2 ? ARM::t2Bcc : ARM::Bcc))
7492 unsigned srcIn = srcLoop;
7493 unsigned destIn = destLoop;
7494 for (
unsigned i = 0; i < BytesLeft; i++) {
7498 emitPostLd(BB, StartOfExit, TII, dl, 1, scratch, srcIn, srcOut,
7499 IsThumb1, IsThumb2);
7500 emitPostSt(BB, StartOfExit, TII, dl, 1, scratch, destIn, destOut,
7501 IsThumb1, IsThumb2);
7511 ARMTargetLowering::EmitLowered__chkstk(
MachineInstr *MI,
7518 "__chkstk is only supported on Windows");
7519 assert(Subtarget->
isThumb2() &&
"Windows on ARM requires Thumb-2 mode");
7545 .addImm((
unsigned)ARMCC::
AL).addReg(0)
7546 .addExternalSymbol("__chkstk")
7551 case CodeModel::
Large:
7557 .addExternalSymbol(
"__chkstk");
7559 .addImm((
unsigned)ARMCC::
AL).addReg(0)
7560 .addReg(Reg, RegState::
Kill)
7570 .addReg(
ARM::
SP).addReg(
ARM::R4)));
7572 MI->eraseFromParent();
7581 bool isThumb2 = Subtarget->
isThumb2();
7582 switch (MI->getOpcode()) {
7590 case ARM::t2STR_preidx:
7591 MI->setDesc(TII->
get(ARM::t2STR_PRE));
7593 case ARM::t2STRB_preidx:
7594 MI->setDesc(TII->
get(ARM::t2STRB_PRE));
7596 case ARM::t2STRH_preidx:
7597 MI->setDesc(TII->
get(ARM::t2STRH_PRE));
7600 case ARM::STRi_preidx:
7601 case ARM::STRBi_preidx: {
7602 unsigned NewOpc = MI->getOpcode() == ARM::STRi_preidx ?
7603 ARM::STR_PRE_IMM : ARM::STRB_PRE_IMM;
7605 unsigned Offset = MI->
getOperand(4).getImm();
7613 .addOperand(MI->getOperand(0))
7614 .addOperand(MI->getOperand(1))
7615 .addOperand(MI->getOperand(2))
7618 .addOperand(MI->getOperand(6))
7619 .addMemOperand(MMO);
7623 case ARM::STRr_preidx:
7624 case ARM::STRBr_preidx:
7625 case ARM::STRH_preidx: {
7627 switch (MI->getOpcode()) {
7629 case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG;
break;
7630 case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG;
break;
7631 case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE;
break;
7634 for (
unsigned i = 0; i < MI->getNumOperands(); ++i)
7640 case ARM::tMOVCCr_pseudo: {
7645 const BasicBlock *LLVM_BB = BB->getBasicBlock();
7667 BB->addSuccessor(copy0MBB);
7668 BB->addSuccessor(sinkMBB);
7670 BuildMI(BB, dl, TII->
get(ARM::tBcc)).addMBB(sinkMBB)
7671 .
addImm(MI->getOperand(3).getImm()).addReg(MI->getOperand(4).getReg());
7687 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
7688 .
addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
7695 case ARM::BCCZi64: {
7701 bool RHSisZero = MI->getOpcode() == ARM::BCCZi64;
7707 TII->
get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
7708 .addReg(LHS1).
addImm(0));
7709 BuildMI(BB, dl, TII->
get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
7716 TII->
get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
7717 .addReg(LHS1).
addReg(RHS1));
7718 BuildMI(BB, dl, TII->
get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
7719 .addReg(LHS2).
addReg(RHS2)
7725 if (MI->getOperand(0).getImm() ==
ARMCC::NE)
7728 BuildMI(BB, dl, TII->
get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
7733 BuildMI(BB, dl, TII->
get(ARM::B)) .addMBB(exitMBB);
7735 MI->eraseFromParent();
7739 case ARM::Int_eh_sjlj_setjmp:
7740 case ARM::Int_eh_sjlj_setjmp_nofp:
7741 case ARM::tInt_eh_sjlj_setjmp:
7742 case ARM::t2Int_eh_sjlj_setjmp:
7743 case ARM::t2Int_eh_sjlj_setjmp_nofp:
7744 EmitSjLjDispatchBlock(MI, BB);
7761 const BasicBlock *LLVM_BB = BB->getBasicBlock();
7770 unsigned int ABSSrcReg = MI->getOperand(1).getReg();
7771 unsigned int ABSDstReg = MI->getOperand(0).getReg();
7772 bool ABSSrcKIll = MI->getOperand(1).isKill();
7773 bool isThumb2 = Subtarget->
isThumb2();
7777 unsigned NewRsbDstReg =
7785 BB->addSuccessor(RSBBB);
7786 BB->addSuccessor(SinkBB);
7793 TII->
get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
7794 .addReg(ABSSrcReg).
addImm(0));
7798 TII->
get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB)
7805 TII->
get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg)
7813 .addReg(NewRsbDstReg).
addMBB(RSBBB)
7822 case ARM::COPY_STRUCT_BYVAL_I32:
7824 return EmitStructByval(MI, BB);
7826 return EmitLowered__chkstk(MI, BB);
7844 MCID = &TII->get(NewOpc);
7847 "converted opcode should be the same except for cc_out");
7859 assert(!NewOpc &&
"Optional cc_out operand required");
7865 bool deadCPSR =
false;
7878 assert(!NewOpc &&
"Optional cc_out operand required");
7881 assert(deadCPSR == !Node->
hasAnyUseOfValue(1) &&
"inconsistent dead flag");
7884 "expect uninitialized optional cc_out operand");
7924 default:
return false;
7995 bool AllOnes =
false) {
8002 NonConstantVal, DAG))
8008 OtherOp, NonConstantVal);
8014 CCOp, TrueVal, FalseVal);
8065 unsigned nextIndex = 0;
8088 || C1->getZExtValue() != nextIndex+1)
8126 return DAG.
getNode(ExtOp, dl, VT, tmp);
8164 if (AddcOp0.getNode() == AddcOp1.
getNode())
8169 "Expect ADDC with two result values. First: i32");
8193 "ADDE node has the wrong inputs");
8200 if (AddeOp0.getNode() == AddeOp1.
getNode())
8204 bool IsLeftOperandMUL =
false;
8209 IsLeftOperandMUL =
true;
8226 if (IsLeftOperandMUL)
8235 if (AddcOp0 == MULOp.
getValue(0)) {
8239 if (AddcOp1 == MULOp.
getValue(0)) {
8258 DAG.
getVTList(MVT::i32, MVT::i32), Ops);
8298 if (Result.
getNode())
return Result;
8330 if (Result.
getNode())
return Result;
8377 return DAG.
getNode(Opcode, DL, VT,
8404 unsigned ShiftAmt = countTrailingZeros<uint64_t>(MulAmt);
8406 ShiftAmt = ShiftAmt & (32 - 1);
8411 MulAmt >>= ShiftAmt;
8433 uint64_t MulAmtAbs = -MulAmt;
8479 APInt SplatBits, SplatUndef;
8480 unsigned SplatBitSize;
8483 BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
8484 if (SplatBitSize <= 64) {
8522 APInt SplatBits, SplatUndef;
8523 unsigned SplatBitSize;
8525 if (BVN && Subtarget->
hasNEON() &&
8526 BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
8527 if (SplatBitSize <= 64) {
8561 unsigned SplatBitSize;
8564 APInt SplatBits0, SplatBits1;
8568 if (BVN0 && BVN0->
isConstantSplat(SplatBits0, SplatUndef, SplatBitSize,
8569 HasAnyUndefs) && !HasAnyUndefs) {
8570 if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize,
8571 HasAnyUndefs) && !HasAnyUndefs) {
8576 SplatBits0 == ~SplatBits1) {
8628 if ((Val & ~Mask) != Val)
8656 (Mask == 0xffff || Mask == 0xffff0000))
8672 (Mask2 == 0xffff || Mask2 == 0xffff0000))
8692 unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue();
8735 unsigned InvMask = cast<ConstantSDNode>(N->
getOperand(2))->getZExtValue();
8739 static_cast<unsigned>(std::numeric_limits<unsigned>::digits) &&
8740 "undefined behavior");
8741 unsigned Mask = (1u << Width) - 1;
8743 if ((Mask & (~Mask2)) == 0)
8779 DAG.getConstant(4, DL, MVT::i32));
8780 SDValue NewLD2 = DAG.getLoad(MVT::i32, DL, NewLD1.
getValue(1), OffsetPtr,
8819 for (
unsigned i = 0; i < NumElts; ++i) {
8851 for (
unsigned i = 0; i < NumElts; ++i) {
8885 assert(EltVT ==
MVT::f32 &&
"Unexpected type!");
8898 unsigned NumOfBitCastedElts = 0;
8900 unsigned NumOfRelevantElts = NumElts;
8901 for (
unsigned Idx = 0; Idx < NumElts; ++Idx) {
8906 ++NumOfBitCastedElts;
8910 --NumOfRelevantElts;
8914 if (NumOfBitCastedElts <= NumOfRelevantElts / 2)
8922 if (!TLI.isTypeLegal(VecVT))
8932 for (
unsigned Idx = 0 ; Idx < NumElts; ++Idx) {
9018 unsigned HalfElts = NumElts/2;
9020 for (
unsigned n = 0; n < NumElts; ++n) {
9023 if (MaskElt < (
int)HalfElts)
9025 else if (MaskElt >= (
int)NumElts && MaskElt < (
int)(NumElts + HalfElts))
9026 NewElt = HalfElts + MaskElt - NumElts;
9044 const unsigned AddrOpIdx = ((isIntrinsic || isStore) ? 2 : 1);
9054 UI.getUse().getResNo() != Addr.
getResNo())
9063 bool isLoadOp =
true;
9064 bool isLaneOp =
false;
9065 unsigned NewOpc = 0;
9066 unsigned NumVecs = 0;
9068 unsigned IntNo = cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue();
9080 NumVecs = 2; isLaneOp =
true;
break;
9082 NumVecs = 3; isLaneOp =
true;
break;
9084 NumVecs = 4; isLaneOp =
true;
break;
9086 NumVecs = 1; isLoadOp =
false;
break;
9088 NumVecs = 2; isLoadOp =
false;
break;
9090 NumVecs = 3; isLoadOp =
false;
break;
9092 NumVecs = 4; isLoadOp =
false;
break;
9094 NumVecs = 2; isLoadOp =
false; isLaneOp =
true;
break;
9096 NumVecs = 3; isLoadOp =
false; isLaneOp =
true;
break;
9098 NumVecs = 4; isLoadOp =
false; isLaneOp =
true;
break;
9108 NumVecs = 1; isLaneOp =
false;
break;
9110 NumVecs = 1; isLaneOp =
false; isLoadOp =
false;
break;
9118 }
else if (isIntrinsic) {
9121 assert(isStore &&
"Node has to be a load, a store, or an intrinsic!");
9132 uint64_t IncVal = CInc->getZExtValue();
9133 if (IncVal != NumBytes)
9135 }
else if (NumBytes >= 3 * 16) {
9144 EVT AlignedVecTy = VecTy;
9161 if (isa<LSBaseSDNode>(N)) {
9166 assert(NumVecs == 1 &&
"Unexpected multi-element generic load/store.");
9167 assert(!isLaneOp &&
"Unexpected generic load/store lane.");
9184 unsigned NumResultVecs = (isLoadOp ? NumVecs : 0);
9186 for (n = 0; n < NumResultVecs; ++n)
9187 Tys[n] = AlignedVecTy;
9198 if (
StoreSDNode *StN = dyn_cast<StoreSDNode>(N)) {
9204 for (
unsigned i = AddrOpIdx + 1; i < N->
getNumOperands() - 1; ++i)
9224 for (
unsigned i = 0; i < NumResultVecs; ++i)
9230 SDValue &LdVal = NewResults[0];
9266 unsigned NumVecs = 0;
9267 unsigned NewOpc = 0;
9268 unsigned IntNo = cast<ConstantSDNode>(VLD->
getOperand(1))->getZExtValue();
9269 if (IntNo == Intrinsic::arm_neon_vld2lane) {
9272 }
else if (IntNo == Intrinsic::arm_neon_vld3lane) {
9275 }
else if (IntNo == Intrinsic::arm_neon_vld4lane) {
9284 unsigned VLDLaneNo =
9285 cast<ConstantSDNode>(VLD->
getOperand(NumVecs+3))->getZExtValue();
9289 if (UI.getUse().getResNo() == NumVecs)
9293 VLDLaneNo != cast<ConstantSDNode>(User->
getOperand(1))->getZExtValue())
9300 for (n = 0; n < NumVecs; ++n)
9313 unsigned ResNo = UI.getUse().
getResNo();
9315 if (ResNo == NumVecs)
9323 std::vector<SDValue> VLDDupResults;
9324 for (
unsigned n = 0; n < NumVecs; ++n)
9325 VLDDupResults.push_back(
SDValue(VLDDup.getNode(), n));
9326 VLDDupResults.push_back(
SDValue(VLDDup.getNode(), NumVecs));
9353 unsigned Imm = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
9394 assert(StVT != VT &&
"Cannot truncate to the same type");
9403 if (0 != (NumElems * FromEltSz) % ToEltSz)
return SDValue();
9405 unsigned SizeRatio = FromEltSz / ToEltSz;
9406 assert(SizeRatio * NumElems * ToEltSz == VT.
getSizeInBits());
9410 NumElems*SizeRatio);
9416 for (
unsigned i = 0; i < NumElems; ++i)
9418 ? (i + 1) * SizeRatio - 1
9433 if (TLI.
isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz)
9452 for (
unsigned I = 0;
I < E;
I++) {
9454 StoreType, ShuffWide,
9542 c0 = (
I == 0) ? cN : c0;
9591 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs :
9592 Intrinsic::arm_neon_vcvtfp2fxu;
9650 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp :
9651 Intrinsic::arm_neon_vcvtfxu2fp;
9666 APInt SplatBits, SplatUndef;
9667 unsigned SplatBitSize;
9669 if (! BVN || ! BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
9670 HasAnyUndefs, ElementBits) ||
9671 SplatBitSize > ElementBits)
9673 Cnt = SplatBits.getSExtValue();
9682 assert(VT.
isVector() &&
"vector shift count is not a vector type");
9686 return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits);
9697 assert(VT.
isVector() &&
"vector shift count is not a vector type");
9703 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits));
9708 unsigned IntNo = cast<ConstantSDNode>(N->
getOperand(0))->getZExtValue();
9719 case Intrinsic::arm_neon_vshifts:
9720 case Intrinsic::arm_neon_vshiftu:
9721 case Intrinsic::arm_neon_vrshifts:
9722 case Intrinsic::arm_neon_vrshiftu:
9723 case Intrinsic::arm_neon_vrshiftn:
9724 case Intrinsic::arm_neon_vqshifts:
9725 case Intrinsic::arm_neon_vqshiftu:
9726 case Intrinsic::arm_neon_vqshiftsu:
9727 case Intrinsic::arm_neon_vqshiftns:
9728 case Intrinsic::arm_neon_vqshiftnu:
9729 case Intrinsic::arm_neon_vqshiftnsu:
9730 case Intrinsic::arm_neon_vqrshiftns:
9731 case Intrinsic::arm_neon_vqrshiftnu:
9732 case Intrinsic::arm_neon_vqrshiftnsu: {
9735 unsigned VShiftOpc = 0;
9738 case Intrinsic::arm_neon_vshifts:
9739 case Intrinsic::arm_neon_vshiftu:
9745 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ?
9751 case Intrinsic::arm_neon_vrshifts:
9752 case Intrinsic::arm_neon_vrshiftu:
9757 case Intrinsic::arm_neon_vqshifts:
9758 case Intrinsic::arm_neon_vqshiftu:
9763 case Intrinsic::arm_neon_vqshiftsu:
9768 case Intrinsic::arm_neon_vrshiftn:
9769 case Intrinsic::arm_neon_vqshiftns:
9770 case Intrinsic::arm_neon_vqshiftnu:
9771 case Intrinsic::arm_neon_vqshiftnsu:
9772 case Intrinsic::arm_neon_vqrshiftns:
9773 case Intrinsic::arm_neon_vqrshiftnu:
9774 case Intrinsic::arm_neon_vqrshiftnsu:
9786 case Intrinsic::arm_neon_vshifts:
9787 case Intrinsic::arm_neon_vshiftu:
9790 case Intrinsic::arm_neon_vrshifts:
9792 case Intrinsic::arm_neon_vrshiftu:
9794 case Intrinsic::arm_neon_vrshiftn:
9796 case Intrinsic::arm_neon_vqshifts:
9798 case Intrinsic::arm_neon_vqshiftu:
9800 case Intrinsic::arm_neon_vqshiftsu:
9802 case Intrinsic::arm_neon_vqshiftns:
9804 case Intrinsic::arm_neon_vqshiftnu:
9806 case Intrinsic::arm_neon_vqshiftnsu:
9808 case Intrinsic::arm_neon_vqrshiftns:
9810 case Intrinsic::arm_neon_vqrshiftnu:
9812 case Intrinsic::arm_neon_vqrshiftnsu:
9821 case Intrinsic::arm_neon_vshiftins: {
9824 unsigned VShiftOpc = 0;
9840 case Intrinsic::arm_neon_vqrshifts:
9841 case Intrinsic::arm_neon_vqrshiftu:
9875 assert(ST->
hasNEON() &&
"unexpected vector shift");
9919 if (VT == MVT::i32 &&
9922 isa<ConstantSDNode>(Lane)) {
9964 unsigned Opcode = 0;
10063 if (CC ==
ARMCC::NE && FalseVal == RHS && FalseVal != LHS) {
10066 }
else if (CC ==
ARMCC::EQ && TrueVal == RHS) {
10074 APInt KnownZero, KnownOne;
10077 if (KnownZero == 0xfffffffe)
10080 else if (KnownZero == 0xffffff00)
10083 else if (KnownZero == 0xffff0000)
10131 switch (cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue()) {
10132 case Intrinsic::arm_neon_vld1:
10133 case Intrinsic::arm_neon_vld2:
10134 case Intrinsic::arm_neon_vld3:
10135 case Intrinsic::arm_neon_vld4:
10136 case Intrinsic::arm_neon_vld2lane:
10137 case Intrinsic::arm_neon_vld3lane:
10138 case Intrinsic::arm_neon_vld4lane:
10139 case Intrinsic::arm_neon_vst1:
10140 case Intrinsic::arm_neon_vst2:
10141 case Intrinsic::arm_neon_vst3:
10142 case Intrinsic::arm_neon_vst4:
10143 case Intrinsic::arm_neon_vst2lane:
10144 case Intrinsic::arm_neon_vst3lane:
10145 case Intrinsic::arm_neon_vst4lane:
10162 bool *
Fast)
const {
10173 if (AllowsUnaligned) {
10185 if (Subtarget->
hasNEON() && (AllowsUnaligned || Subtarget->
isLittle())) {
10196 unsigned AlignCheck) {
10197 return ((SrcAlign == 0 || SrcAlign % AlignCheck == 0) &&
10198 (DstAlign == 0 || DstAlign % AlignCheck == 0));
10202 unsigned DstAlign,
unsigned SrcAlign,
10203 bool IsMemset,
bool ZeroMemset,
10209 if ((!IsMemset || ZeroMemset) && Subtarget->
hasNEON() &&
10216 }
else if (Size >= 8 &&
10227 else if (Size >= 2)
10296 unsigned Scale = 1;
10298 default:
return false;
10313 if ((V & (Scale - 1)) != 0)
10316 return V == (V & ((1LL << 5) - 1));
10321 bool isNeg =
false;
10328 default:
return false;
10335 return V == (V & ((1LL << 8) - 1));
10336 return V == (V & ((1LL << 12) - 1));
10345 return V == (V & ((1LL << 8) - 1));
10369 default:
return false;
10374 return V == (V & ((1LL << 12) - 1));
10377 return V == (V & ((1LL << 8) - 1));
10385 return V == (V & ((1LL << 8) - 1));
10391 int Scale = AM.
Scale;
10396 default:
return false;
10404 Scale = Scale & ~1;
10405 return Scale == 2 || Scale == 4 || Scale == 8;
10417 if (Scale & 1)
return false;
10426 unsigned AS)
const {
10435 switch (AM.
Scale) {
10453 int Scale = AM.
Scale;
10455 default:
return false;
10459 if (Scale < 0) Scale = -Scale;
10477 if (Scale & 1)
return false;
10495 return Imm >= 0 && Imm <= 255;
10510 return AbsImm >= 0 && AbsImm <= 255;
10515 SDValue &Offset,
bool &isInc,
10524 int RHSC = (
int)RHS->getZExtValue();
10525 if (RHSC < 0 && RHSC > -256) {
10538 int RHSC = (
int)RHS->getZExtValue();
10539 if (RHSC < 0 && RHSC > -0x1000) {
10574 SDValue &Offset,
bool &isInc,
10581 int RHSC = (
int)RHS->getZExtValue();
10582 if (RHSC < 0 && RHSC > -0x100) {
10587 }
else if (RHSC > 0 && RHSC < 0x100) {
10612 Ptr =
LD->getBasePtr();
10613 VT =
LD->getMemoryVT();
10615 }
else if (
StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
10616 Ptr = ST->getBasePtr();
10617 VT = ST->getMemoryVT();
10622 bool isLegal =
false;
10625 Offset, isInc, DAG);
10628 Offset, isInc, DAG);
10651 VT =
LD->getMemoryVT();
10652 Ptr =
LD->getBasePtr();
10654 }
else if (
StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
10655 VT = ST->getMemoryVT();
10656 Ptr = ST->getBasePtr();
10661 bool isLegal =
false;
10691 unsigned Depth)
const {
10693 KnownZero = KnownOne =
APInt(BitWidth, 0);
10708 if (KnownZero == 0 && KnownOne == 0)
return;
10710 APInt KnownZeroRHS, KnownOneRHS;
10712 KnownZero &= KnownZeroRHS;
10713 KnownOne &= KnownOneRHS;
10721 case Intrinsic::arm_ldaex:
10722 case Intrinsic::arm_ldrex: {
10723 EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT();
10747 switch (AsmPieces.
size()) {
10748 default:
return false;
10750 AsmStr = AsmPieces[0];
10755 if (AsmPieces.
size() == 3 &&
10756 AsmPieces[0] ==
"rev" && AsmPieces[1] ==
"$0" && AsmPieces[2] ==
"$1" &&
10772 if (Constraint.
size() == 1) {
10773 switch (Constraint[0]) {
10785 }
else if (Constraint.
size() == 2) {
10786 switch (Constraint[0]) {
10805 if (!CallOperandVal)
10809 switch (*constraint) {
10829 typedef std::pair<unsigned, const TargetRegisterClass*>
RCPair;
10832 if (Constraint.
size() == 1) {
10834 switch (Constraint[0]) {
10837 return RCPair(0U, &ARM::tGPRRegClass);
10838 return RCPair(0U, &ARM::GPRRegClass);
10841 return RCPair(0U, &ARM::hGPRRegClass);
10845 return RCPair(0U, &ARM::tGPRRegClass);
10846 return RCPair(0U, &ARM::GPRRegClass);
10851 return RCPair(0U, &ARM::SPRRegClass);
10853 return RCPair(0U, &ARM::DPRRegClass);
10855 return RCPair(0U, &ARM::QPRRegClass);
10861 return RCPair(0U, &ARM::SPR_8RegClass);
10863 return RCPair(0U, &ARM::DPR_8RegClass);
10865 return RCPair(0U, &ARM::QPR_8RegClass);
10869 return RCPair(0U, &ARM::SPRRegClass);
10873 if (
StringRef(
"{cc}").equals_lower(Constraint))
10874 return std::make_pair(
unsigned(ARM::CPSR), &ARM::CCRRegClass);
10882 std::string &Constraint,
10883 std::vector<SDValue>&Ops,
10888 if (Constraint.length() != 1)
return;
10890 char ConstraintLetter = Constraint[0];
10891 switch (ConstraintLetter) {
10894 case 'I':
case 'J':
case 'K':
case 'L':
10895 case 'M':
case 'N':
case 'O':
10901 int CVal = (
int) CVal64;
10904 if (CVal != CVal64)
10907 switch (ConstraintLetter) {
10912 if (CVal >= 0 && CVal <= 65535)
10919 if (CVal >= 0 && CVal <= 255)
10921 }
else if (Subtarget->
isThumb2()) {
10940 if (CVal >= -255 && CVal <= -1)
10946 if (CVal >= -4095 && CVal <= 4095)
10959 }
else if (Subtarget->
isThumb2()) {
10982 if (CVal >= -7 && CVal < 7)
10984 }
else if (Subtarget->
isThumb2()) {
11007 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0))
11013 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0))
11021 if (CVal >= 0 && CVal <= 31)
11030 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0))
11040 Ops.push_back(Result);
11047 assert(Subtarget->
isTargetAEABI() &&
"Register-based DivRem lowering only");
11050 "Invalid opcode for Div/Rem lowering");
11073 Entry.
isSExt = isSigned;
11074 Entry.
isZExt = !isSigned;
11075 Args.push_back(Entry);
11089 std::pair<SDValue, SDValue> CallInfo =
LowerCallTo(CLI);
11090 return CallInfo.first;
11095 assert(Subtarget->
isTargetWindows() &&
"unsupported target platform");
11115 SDValue Ops[2] = { NewSP, Chain };
11121 "Unexpected type for custom-lowering FP_EXTEND");
11128 false,
SDLoc(Op)).first;
11134 "Unexpected type for custom-lowering FP_ROUND");
11141 false,
SDLoc(Op)).first;
11151 if (v == 0xffffffff)
11168 return ARM_AM::getFP64Imm(Imm) != -1;
11177 unsigned Intrinsic)
const {
11178 switch (Intrinsic) {
11179 case Intrinsic::arm_neon_vld1:
11180 case Intrinsic::arm_neon_vld2:
11181 case Intrinsic::arm_neon_vld3:
11182 case Intrinsic::arm_neon_vld4:
11183 case Intrinsic::arm_neon_vld2lane:
11184 case Intrinsic::arm_neon_vld3lane:
11185 case Intrinsic::arm_neon_vld4lane: {
11189 uint64_t NumElts = DL.getTypeAllocSize(I.
getType()) / 8;
11194 Info.
align = cast<ConstantInt>(AlignArg)->getZExtValue();
11200 case Intrinsic::arm_neon_vst1:
11201 case Intrinsic::arm_neon_vst2:
11202 case Intrinsic::arm_neon_vst3:
11203 case Intrinsic::arm_neon_vst4:
11204 case Intrinsic::arm_neon_vst2lane:
11205 case Intrinsic::arm_neon_vst3lane:
11206 case Intrinsic::arm_neon_vst4lane: {
11210 unsigned NumElts = 0;
11215 NumElts += DL.getTypeAllocSize(ArgTy) / 8;
11221 Info.
align = cast<ConstantInt>(AlignArg)->getZExtValue();
11227 case Intrinsic::arm_ldaex:
11228 case Intrinsic::arm_ldrex: {
11241 case Intrinsic::arm_stlex:
11242 case Intrinsic::arm_strex: {
11255 case Intrinsic::arm_stlexd:
11256 case Intrinsic::arm_strexd: {
11267 case Intrinsic::arm_ldaexd:
11268 case Intrinsic::arm_ldrexd: {
11293 if (Bits == 0 || Bits > 32)
11332 bool IsLoad)
const {
11360 bool IsLoad)
const {
11385 return (Size == 64) && !Subtarget->
isMClass();
11397 return (Size == 64) && !Subtarget->
isMClass();
11405 return (Size <= (Subtarget->
isMClass() ? 32U : 64U))
11416 unsigned &Cost)
const {
11430 if (!isa<ConstantInt>(Idx))
11433 assert(VectorTy->
isVectorTy() &&
"VectorTy is not a vector type");
11434 unsigned BitWidth = cast<VectorType>(VectorTy)->
getBitWidth();
11437 if (BitWidth == 64 || BitWidth == 128) {
11447 Type *ValTy = cast<PointerType>(Addr->
getType())->getElementType();
11455 IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd;
11459 Value *LoHi = Builder.
CreateCall(Ldrex, Addr,
"lohi");
11472 Intrinsic::ID Int = IsAcquire ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex;
11477 cast<PointerType>(Addr->
getType())->getElementType());
11491 IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd;
11495 Value *Lo = Builder.
CreateTrunc(Val, Int32Ty,
"lo");
11503 Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex;
11528 "Invalid interleave factor");
11529 assert(!Shuffles.
empty() &&
"Empty shufflevector input");
11530 assert(Shuffles.
size() == Indices.
size() &&
11531 "Unmatched number of shufflevectors and indices");
11542 if ((VecSize != 64 && VecSize != 128) || EltIs64Bits)
11551 static const Intrinsic::ID LoadInts[3] = {Intrinsic::arm_neon_vld2,
11552 Intrinsic::arm_neon_vld3,
11553 Intrinsic::arm_neon_vld4};
11565 CallInst *VldN = Builder.CreateCall(VldnFunc, Ops,
"vldN");
11569 for (
unsigned i = 0; i < Shuffles.
size(); i++) {
11571 unsigned Index = Indices[i];
11573 Value *SubVec = Builder.CreateExtractValue(VldN, Index);
11577 SubVec = Builder.CreateIntToPtr(SubVec, SV->
getType());
11589 unsigned NumElts) {
11591 for (
unsigned i = 0; i < NumElts; i++)
11614 unsigned Factor)
const {
11616 "Invalid interleave factor");
11620 "Invalid interleaved store");
11632 if ((SubVecSize != 64 && SubVecSize != 128) || EltIs64Bits)
11653 static Intrinsic::ID StoreInts[3] = {Intrinsic::arm_neon_vst2,
11654 Intrinsic::arm_neon_vst3,
11655 Intrinsic::arm_neon_vst4};
11657 SI->
getModule(), StoreInts[Factor - 2], SubVecTy);
11665 for (
unsigned i = 0; i < Factor; i++)
11683 uint64_t &Members) {
11684 if (
const StructType *ST = dyn_cast<StructType>(Ty)) {
11685 for (
unsigned i = 0; i < ST->getNumElements(); ++i) {
11686 uint64_t SubMembers = 0;
11689 Members += SubMembers;
11691 }
else if (
const ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
11692 uint64_t SubMembers = 0;
11695 Members += SubMembers * AT->getNumElements();
11706 }
else if (
const VectorType *VT = dyn_cast<VectorType>(Ty)) {
11713 return VT->getBitWidth() == 64;
11715 return VT->getBitWidth() == 128;
11717 switch (VT->getBitWidth()) {
11730 return (Members > 0 && Members <= 4);
11738 if (getEffectiveCallingConv(CallConv, isVarArg) !=
11743 uint64_t Members = 0;
11748 return IsHA || IsIntArray;
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG)
PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics.
static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG)
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG)
static unsigned getBitWidth(Type *Ty, const DataLayout &DL)
Returns the bitwidth of the given scalar or pointer type (if unknown returns 0).
int getFunctionContextIndex() const
Return the index for the function context object.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
void setFrameAddressIsTaken(bool T)
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Value * getValueOperand()
MO_DLLIMPORT - On a symbol operand, this represents that the reference to the symbol is for an import...
bool use_empty() const
Return true if there are no uses of this node.
const Value * getCalledValue() const
getCalledValue - Get a pointer to the function that is invoked by this instruction.
static MVT getIntegerVT(unsigned BitWidth)
void push_back(const T &Elt)
The memory access reads data.
const MachineFunction * getParent() const
getParent - Return the MachineFunction containing this basic block.
static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG)
A parsed version of the target data layout string in and methods for querying it. ...
static SDValue PerformSELECT_CCCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
PerformSELECT_CCCombine - Target-specific DAG combining for ISD::SELECT_CC to match f32 max/min patte...
static bool isVZIPMask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList, unsigned FixedArgs=-1)
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
SDValue getValue(unsigned R) const
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
The memory access writes data.
TargetLoweringBase::AtomicRMWExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
static SDValue PerformBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformBUILD_VECTORCombine - Target-specific dag combine xforms for ISD::BUILD_VECTOR.
static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt)
isVShiftLImm - Check if this is a valid build_vector for the immediate operand of a vector shift left...
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand...
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - Return the value type to use for ISD::SETCC.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant, which is required to be operand #1) half of the integer or float value specified as operand #0.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function. ...
static APInt getAllOnesValue(unsigned numBits)
Get the all-ones value.
LLVMContext * getContext() const
LLVM Argument representation.
bool isOSBinFormatMachO() const
Tests whether the environment is MachO.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG)
SDValue getCopyToReg(SDValue Chain, SDLoc dl, unsigned Reg, SDValue N)
uint64_t getZExtValue() const
Get zero extended value.
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, SDLoc DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd)...
SDValue CombineTo(SDNode *N, ArrayRef< SDValue > To, bool AddTo=true)
static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG)
STATISTIC(NumFunctions,"Total number of functions")
size_t size() const
size - Get the string size.
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR (an vector value) starting with the ...
bool isKnownNeverNaN(SDValue Op) const
Test whether the given SDValue is known to never be NaN.
Reloc::Model getRelocationModel() const
Returns the code generation relocation model.
BR_CC - Conditional branch.
static SDValue PerformVMULCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformVMULCombine Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the special multi...
static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op, SelectionDAG &DAG)
LocInfo getLocInfo() const
static MVT getVectorVT(MVT VT, unsigned NumElements)
TOF
Target Operand Flag enum.
bool hasOneUse() const
Return true if there is exactly one use of this node.
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
static void ExpandREAD_REGISTER(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG)
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
static MachinePointerInfo getJumpTable()
getJumpTable - Return a MachinePointerInfo record that refers to a jump table entry.
A Module instance is used to store all the information related to an LLVM module. ...
void setIsLandingPad(bool V=true)
setIsLandingPad - Indicates the block is a landing pad.
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
void getInRegsParamInfo(unsigned InRegsParamRecordIndex, unsigned &BeginReg, unsigned &EndReg) const
ARM_APCS - ARM Procedure Calling Standard calling convention (obsolete, but still used on some target...
const TargetMachine & getTargetMachine() const
bool isAtLeastAcquire(AtomicOrdering Ord)
Returns true if the ordering is at least as strong as acquire (i.e.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
static unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm)
bool isCalledByLegalizer() const
static SDValue lowerCTPOP32BitElements(SDNode *N, SelectionDAG &DAG)
lowerCTPOP32BitElements - Returns a v2i32/v4i32 vector containing the bit-count for each 32-bit eleme...
static SDValue PerformInsertEltCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
PerformInsertEltCombine - Target-specific dag combine xforms for ISD::INSERT_VECTOR_ELT.
CallLoweringInfo & setDebugLoc(SDLoc dl)
static bool isVTRNMask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
SDValue getMergeValues(ArrayRef< SDValue > Ops, SDLoc dl)
Create a MERGE_VALUES node from the given operands.
static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG &DAG)
SkipLoadExtensionForVMULL - return a load of the original vector size that does not do any sign/zero ...
int getSplatIndex() const
Carry-setting nodes for multiple precision addition and subtraction.
const TargetMachine & getTarget() const
ARMConstantPoolValue - ARM specific constantpool value.
static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, bool isSigned)
isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each element has been zero/sign-...
unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
unsigned getPrefTypeAlignment(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
void setIsDef(bool Val=true)
Change a def to a use, or a use to a def.
Describe properties that are true of each instruction in the target description file.
static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base, uint64_t &Members)
bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
getPreIndexedAddressParts - returns true by value, base pointer and offset pointer and addressing mod...
Y = RRC X, rotate right via carry.
static bool isVirtualRegister(unsigned Reg)
isVirtualRegister - Return true if the specified register number is in the virtual register namespace...
static bool hasNormalLoadOperand(SDNode *N)
hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node are normal, non-volatile loads.
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain...
bool hasT2ExtractPack() const
CallInst - This class represents a function call, abstracting a target machine's calling convention...
unsigned InferPtrAlignment(SDValue Ptr) const
Infer alignment of a load / store address.
void computeKnownBits(SDValue Op, APInt &KnownZero, APInt &KnownOne, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in the KnownZero/KnownO...
EK_Inline - Jump table entries are emitted inline at their point of use.
size_type count(PtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
const GlobalValue * getGlobal() const
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit...
const std::string & getAsmString() const
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change...
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
SDValue getSelectCC(SDLoc DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond)
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
static bool isLegalT1AddressImmediate(int64_t V, EVT VT)
static cl::opt< bool > ARMInterworking("arm-interworking", cl::Hidden, cl::desc("Enable / disable ARM interworking (for debugging only)"), cl::init(true))
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Type * getTypeForEVT(LLVMContext &Context) const
getTypeForEVT - This method returns an LLVM type corresponding to the specified EVT.
unsigned getSizeInBits() const
ShuffleVectorInst - This instruction constructs a fixed permutation of two input vectors.
SDValue getLoad(EVT VT, SDLoc dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, bool isVolatile, bool isNonTemporal, bool isInvariant, unsigned Alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands...
bool isDoubleTy() const
isDoubleTy - Return true if this is 'double', a 64-bit IEEE fp type.
unsigned getByValSize() const
uint64_t getLimitedValue(uint64_t Limit=~0ULL) const
If this value is smaller than the specified limit, return it, otherwise return the limit value...
unsigned getInRegsParamsCount() const
static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG)
SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND, extending load, or BUILD_VECTOR with extended elements, return the unextended value.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
unsigned getNumOperands() const
Return the number of values used by this operation.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
const std::string & getConstraintString() const
const Function * getParent() const
Return the enclosing method, or null if none.
unsigned getNumOperands() const
static SDValue LowerSDIV_v4i16(SDValue N0, SDValue N1, SDLoc dl, SelectionDAG &DAG)
static SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes, TargetLowering::DAGCombinerInfo &DCI)
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *fromMBB)
transferSuccessorsAndUpdatePHIs - Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor blocks which refer to fromMBB to refer to this.
const SDValue & getOperand(unsigned Num) const
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
const Function * getFunction() const
getFunction - Return the LLVM function that this machine code represents
setjmp/longjmp based exceptions
LoadInst - an instruction for reading from memory.
static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCombineWithOperands - Try DAG combinations for an ADD with operands N0 and N1...
static MachinePointerInfo getConstantPool()
getConstantPool - Return a MachinePointerInfo record that refers to the constant pool.
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
AtomicRMWInst - an instruction that atomically reads a memory location, combines it with another valu...
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, ISD::ZERO_EXTEND, and ISD::ANY_EXTEND.
MO_PLT - On a symbol operand, this represents an ELF PLT reference on a call operand.
Value * CallOperandVal
If this is the result output operand or a clobber, this is null, otherwise it is the incoming operand...
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) const override
createFastISel - This method returns a target specific FastISel object, or null if the target does no...
bool isThumb1Only() const
static SDValue PerformANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
static SDValue CombineBaseUpdate(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
CombineBaseUpdate - Target-specific DAG combine function for VLDDUP, NEON load/store intrinsics...
unsigned getValNo() const
const SDValue & getBasePtr() const
bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const override
Return true if it is profitable for dag combiner to transform a floating point op of specified opcode...
ThreadModel::Model ThreadModel
ThreadModel - This flag specifies the type of threading model to assume for things like atomics...
static bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, MachineFrameInfo *MFI, const MachineRegisterInfo *MRI, const TargetInstrInfo *TII)
MatchingStackOffset - Return true if the given stack call argument is already available in the same p...
static SDValue PerformSUBCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB.
bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override
Return true if a truncation from Ty1 to Ty2 is permitted when deciding whether a call is in tail posi...
std::size_t countLeadingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the most significant bit to the least stopping at the first 1...
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, unsigned f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
getMachineMemOperand - Allocate a new MachineMemOperand.
static bool isThumb(const MCSubtargetInfo &STI)
unsigned createPICLabelUId()
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
unsigned getResNo() const
get the index which selects a specific result in the SDNode
Libcall getFPROUND(EVT OpVT, EVT RetVT)
getFPROUND - Return the FPROUND_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(const char *reason, bool gen_crash_diag=true)
Reports a serious error, calling any installed error handler.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
bool isAllOnesValue() const
SDValue getExternalSymbol(const char *Sym, EVT VT)
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
uint64_t getTypeAllocSizeInBits(Type *Ty) const
Returns the offset in bits between successive objects of the specified type, including alignment padd...
bool isShiftedMask_32(uint32_t Value)
isShiftedMask_32 - This function returns true if the argument contains a non-empty sequence of ones w...
bool isOSWindows() const
Tests whether the OS is Windows.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
static MachinePointerInfo getFixedStack(int FI, int64_t offset=0)
getFixedStack - Return a MachinePointerInfo record that refers to the the specified FrameIndex...
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, unsigned Factor) const override
Lower an interleaved store into a vstN intrinsic.
MachineJumpTableInfo * getOrCreateJumpTableInfo(unsigned JTEntryKind)
getOrCreateJumpTableInfo - Get the JumpTableInfo for this function, if it does already exist...
Libcall getUINTTOFP(EVT OpVT, EVT RetVT)
getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG)
const Triple & getTargetTriple() const
const ARMBaseInstrInfo * getInstrInfo() const override
bool isKnownNeverZero(SDValue Op) const
Test whether the given SDValue is known to never be positive or negative Zero.
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic...
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
AtomicRMWExpansionKind
Enum that specifies what a AtomicRMWInst is expanded to, if at all.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations...
bool isVector() const
isVector - Return true if this is a vector value type.
static SDValue LowerInterruptReturn(SmallVectorImpl< SDValue > &RetOps, SDLoc DL, SelectionDAG &DAG)
BlockAddress - The address of a basic block.
SDValue getStore(SDValue Chain, SDLoc dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, bool isVolatile, bool isNonTemporal, unsigned Alignment, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
static SDValue PerformVCVTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD) can replace combinations of ...
bool isTargetAEABI() const
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
bool isSEXTLoad(const SDNode *N)
Returns true if the specified node is a SEXTLOAD.
bool isNegative() const
Return true if the value is negative.
void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
LowerAsmOperandForConstraint - Lower the specified operand into the Ops vector.
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
Code Generation virtual methods...
bool useSoftFloat() const
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
int getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
static bool isFloatingPointZero(SDValue Op)
isFloatingPointZero - Return true if this is +0.0.
static const MachineInstrBuilder & AddDefaultPred(const MachineInstrBuilder &MIB)
MachineMemOperand - A description of a memory reference used in the backend.
static SDValue PerformLOADCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
ParmContext getCallOrPrologue() const
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
const HexagonInstrInfo * TII
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a vldN-lane (N > 1) intrinsic...
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
static MachineOperand CreateReg(unsigned Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false)
Shift and rotation operations.
static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG)
static bool isVUZPMask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
bool isTargetDarwin() const
bool isNormalStore(const SDNode *N)
Returns true if the specified node is a non-truncating and unindexed store.
StructType - Class to represent struct types.
opStatus convertToInteger(integerPart *, unsigned int, bool, roundingMode, bool *) const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef...
Type * getArrayElementType() const
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
A Use represents the edge between a Value definition and its users.
static SDValue findMUL_LOHI(SDValue V)
static bool isVREVMask(ArrayRef< int > M, EVT VT, unsigned BlockSize)
isVREVMask - Check if a vector shuffle corresponds to a VREV instruction with the specified blocksize...
MachineFunction & getMachineFunction() const
static bool isVZIP_v_undef_Mask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of "vector_shuffle v...
Value * emitStoreConditional(IRBuilder<> &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override
Perform a store-conditional operation to Addr.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, const SDValue *Ops, unsigned NumOps, bool isSigned, SDLoc dl, bool doesNotReturn=false, bool isReturnValueUsed=true) const
Returns a pair of (return value, chain).
unsigned getFrameRegister(const MachineFunction &MF) const override
static void advance(T &it, size_t Val)
CallLoweringInfo & setChain(SDValue InChain)
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG)
PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for ISD::VECTOR_SHUFFLE.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
separate const offset from Split GEPs to a variadic base and a constant offset for better CSE
bool isReg() const
isReg - Tests if this is a MO_Register operand.
unsigned getNumArgOperands() const
getNumArgOperands - Return the number of call arguments.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
SDValue getTargetGlobalAddress(const GlobalValue *GV, SDLoc DL, EVT VT, int64_t offset=0, unsigned char TargetFlags=0)
CopyToReg - This node has three operands: a chain, a register number to set to this value...
static SDValue PerformADDCCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCCombine - Target-specific dag combine transform from ISD::ADDC, ISD::ADDE, and ISD::MUL_LOHI to MLAL.
static Constant * get(ArrayRef< Constant * > V)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
unsigned getArgRegsSaveSize() const
FLT_ROUNDS_ - Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest 2 Round to ...
std::vector< MachineBasicBlock * >::iterator succ_iterator
bool isZEXTLoad(const SDNode *N)
Returns true if the specified node is a ZEXTLOAD.
EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc, MachineFunction &MF) const override
Returns the target specific optimal type for load and store operations as a result of memset...
Reg
All possible values of the reg field in the ModR/M byte.
bool getInsertFencesForAtomic() const
Return whether the DAG builder should automatically insert fences and reduce ordering for atomics...
EVT getScalarType() const
getScalarType - If this is a vector type, return the element type, otherwise return this...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt) For double-word atomic operations: ValLo, ValHi, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amtLo, amtHi) ValLo, ValHi, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amtLo, amtHi) These correspond to the atomicrmw instruction.
Number of individual test Apply this number of consecutive mutations to each input exit after the first new interesting input is found the minimized corpus is saved into the first input directory Number of jobs to run If min(jobs, NumberOfCpuCores()/2)\" is used.") FUZZER_FLAG_INT(reload
ObjectFormatType getObjectFormat() const
getFormat - Get the object format for this triple.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
bool hasMPExtension() const
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
bool isStrongDefinitionForLinker() const
Returns true if this global's definition will be the one chosen by the linker.
int getMaskElt(unsigned Idx) const
static bool isSingletonVEXTMask(ArrayRef< int > M, EVT VT, unsigned &Imm)
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
static bool isZeroExtended(SDNode *N, SelectionDAG &DAG)
isZeroExtended - Check if a node is a vector value that is zero-extended or a constant BUILD_VECTOR w...
The memory access is volatile.
static ShiftOpc getShiftOpcForNode(unsigned Opcode)
static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, int64_t &Cnt)
isVShiftRImm - Check if this is a valid build_vector for the immediate operand of a vector shift righ...
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG...
bool isFPBrccSlow() const
Type * getVectorElementType() const
static SDValue PerformMULCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
const MachineInstrBuilder & addImm(int64_t Val) const
addImm - Add a new immediate operand.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Sched::Preference getSchedulingPreference() const
Return target scheduling preference.
bool isInteger() const
isInteger - Return true if this is an integer, or a vector integer type.
static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2)
Return the store opcode for a given store size.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
unsigned getNumOperands() const
Access to explicit operands of the instruction.
bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
getPostIndexedAddressParts - returns true by value, base pointer and offset pointer and addressing mo...
SmallVector< ISD::InputArg, 32 > Ins
STACKSAVE - STACKSAVE has one operand, an input chain.
bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx, unsigned &Cost) const override
Return true if the target can combine store(extractelement VectorTy, Idx).
EVT getVectorElementType() const
getVectorElementType - Given a vector type, return the type of each element.
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
bool shouldExpandAtomicLoadInIR(LoadInst *LI) const override
Returns true if the given (atomic) load should be expanded by the IR-level AtomicExpand pass into a l...
bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector...
static const unsigned PerfectShuffleTable[6561+1]
static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG)
SDValue getCALLSEQ_START(SDValue Chain, SDValue Op, SDLoc DL)
Return a new CALLSEQ_START node, which always must have a glue result (to ensure it's not CSE'd)...
void RemoveOperand(unsigned i)
Erase an operand from an instruction, leaving it with one fewer operand than it started with...
bool lowerInterleavedLoad(LoadInst *LI, ArrayRef< ShuffleVectorInst * > Shuffles, ArrayRef< unsigned > Indices, unsigned Factor) const override
Lower an interleaved load into a vldN intrinsic.
unsigned getLocReg() const
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Value * emitLoadLinked(IRBuilder<> &Builder, Value *Addr, AtomicOrdering Ord) const override
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type...
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself...
unsigned NoNaNsFPMath
NoNaNsFPMath - This flag is enabled when the -enable-no-nans-fp-math flag is specified on the command...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
const Triple & getTargetTriple() const
LLVMContext & getContext() const
getContext - Return the LLVMContext in which this type was uniqued.
const uint32_t * getThisReturnPreservedMask(const MachineFunction &MF, CallingConv::ID) const
getThisReturnPreservedMask - Returns a call preserved mask specific to the case that 'returned' is on...
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG)
static unsigned createNEONModImm(unsigned OpCmode, unsigned Val)
unsigned createJumpTableIndex(const std::vector< MachineBasicBlock * > &DestBBs)
createJumpTableIndex - Create a new jump table.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const
bool isTargetCOFF() const
SDValue getRegisterMask(const uint32_t *RegMask)
bool hasStructRetAttr() const
Determine if the function returns a structure through first pointer argument.
bool hasCallSiteLandingPad(MCSymbol *Sym)
hasCallSiteLandingPad - Return true if the landing pad Eh symbol has an associated call site...
static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG)
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
bool isTargetMachO() const
void AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const override
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag...
ArrayType - Class to represent array types.
This contains information for each constraint that we are lowering.
Simple integer binary arithmetic operators.
static int getT2SOImmVal(unsigned Arg)
getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit into a Thumb-2 shifter_oper...
SDValue getVectorShuffle(EVT VT, SDLoc dl, SDValue N1, SDValue N2, const int *MaskElts)
Return an ISD::VECTOR_SHUFFLE node.
void addInRegsParamInfo(unsigned RegBegin, unsigned RegEnd)
SmallVector< ISD::OutputArg, 32 > Outs
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
CallLoweringInfo & setZExtResult(bool Value=true)
bool isFloatingPointTy() const
isFloatingPointTy - Return true if this is one of the six floating point types
bool hasAnyDataBarrier() const
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
const SDValue & getBasePtr() const
MachineConstantPoolValue * getMachineCPVal() const
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
StoreInst - an instruction for storing to memory.
static mvt_range integer_vector_valuetypes()
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
const APInt & getAPIntValue() const
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
bool isArrayTy() const
isArrayTy - True if this is an instance of ArrayType.
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification, or lowering of the constant.
static int getFP32Imm(const APInt &Imm)
getFP32Imm - Return an 8-bit floating-point version of the 32-bit floating-point value.
EVT getMemoryVT() const
Return the type of the in-memory value.
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
bool isThumb1OnlyFunction() const
static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG)
static const MCPhysReg GPRArgRegs[]
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Type * getElementType() const
size_t size() const
size - Get the array size.
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
const DataLayout & getDataLayout() const
PointerType - Class to represent pointers.
const BasicBlock * getBasicBlock() const
getBasicBlock - Return the LLVM basic block that this instance corresponded to originally.
UNDEF - An undefined node.
This class is used to represent ISD::STORE nodes.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
static bool isThumbImmShiftedVal(unsigned V)
isThumbImmShiftedVal - Return true if the specified value can be obtained by left shifting a 8-bit im...
static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG)
PerformVMOVDRRCombine - Target-specific dag combine xforms for ARMISD::VMOVDRR.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the specified, possibly variable...
TargetInstrInfo - Interface to description of machine instruction set.
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
LLVM_CONSTEXPR size_t array_lengthof(T(&)[N])
Find the length of an array.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Get a value with high bits set.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
static EVT getExtensionTo64Bits(const EVT &OrigVT)
SDNode * getNode() const
get the SDNode which holds the desired result
void setReturnRegsCount(unsigned s)
bundle_iterator< MachineInstr, instr_iterator > iterator
bool isiOS() const
Is this an iOS triple.
A self-contained host- and target-independent arbitrary-precision floating-point software implementat...
unsigned getScalarSizeInBits() const
unsigned getStoreSize() const
getStoreSize - Return the number of bytes overwritten by a store of the specified value type...
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
A switch()-like statement whose cases are string literals.
Type * getParamType(unsigned i) const
Parameter type accessors.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set, or Regs.size() if they are all allocated.
static SDValue PerformADDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
bool isOnlyUserOf(const SDNode *N) const
Return true if this node is the only use of N.
initializer< Ty > init(const Ty &Val)
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
static SDValue PerformBFICombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
PerformBFICombine - (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff the bits being cleared by...
unsigned UnsafeFPMath
UnsafeFPMath - This flag is enabled when the -enable-unsafe-fp-math flag is specified on the command ...
bool isInteger() const
isInteger - Return true if this is an integer, or a vector integer type.
static bool isConstVecPow2(SDValue ConstVec, bool isSigned, uint64_t &C)
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
unsigned getAlignment() const
getAlignment - Return the alignment of the access that is being performed
bool useSoftFloat() const override
static bool isSignExtended(SDNode *N, SelectionDAG &DAG)
isSignExtended - Check if a node is a vector value that is sign-extended or a constant BUILD_VECTOR w...
std::size_t countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the least significant bit to the most stopping at the first 1...
static int getFP32Imm(const APInt &Imm)
getFP32Imm - Return an 8-bit floating-point version of the 32-bit floating-point value.
static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, bool isSEXTLoad, SDValue &Base, SDValue &Offset, bool &isInc, SelectionDAG &DAG)
bool isMachineConstantPoolEntry() const
CodeModel::Model getCodeModel() const
Returns the code model.
MVT - Machine Value Type.
ParmContext
ParmContext - This enum tracks whether calling convention lowering is in the context of prologue or c...
static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, unsigned SplatBitSize, SelectionDAG &DAG, SDLoc dl, EVT &VT, bool is128Bits, NEONModImmType type)
isNEONModifiedImm - Check if the specified splat value corresponds to a valid vector constant for a N...
static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, ArrayRef< int > ShuffleMask, SelectionDAG &DAG)
LLVM Basic Block Representation.
static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, TargetLowering::DAGCombinerInfo &DCI, bool AllOnes=false)
const SDValue & getOperand(unsigned i) const
The instances of the Type class are immutable: once they are created, they are never changed...
bool shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, unsigned &PrefAlign) const override
Return true if the pointer arguments to CI should be aligned by aligning the object whose address is ...
This is an important class for using LLVM in a threaded context.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type...
ARM_AAPCS_VFP - Same as ARM_AAPCS, but uses hard floating point ABI.
Simple binary floating point operators.
void setTargetDAGCombine(ISD::NodeType NT)
Targets should invoke this method for each target independent node that they want to provide a custom...
bool isNonTemporal() const
bool isVectorTy() const
isVectorTy - True if this is an instance of VectorType.
bool supportsTailCall() const
bool isOptionalDef() const
Set if this operand is a optional def.
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
This is an important base class in LLVM.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE...
bool hasHiddenVisibility() const
static SDValue LowerWRITE_REGISTER(SDValue Op, SelectionDAG &DAG)
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL...
const Constant * getConstVal() const
This file contains the declarations for the subclasses of Constant, which represent the different fla...
const MachineOperand & getOperand(unsigned i) const
Carry-using nodes for multiple precision addition and subtraction.
bool isFloatTy() const
isFloatTy - Return true if this is 'float', a 32-bit IEEE fp type.
ConstantFP - Floating Point Values [float, double].
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
static bool isReverseMask(ArrayRef< int > M, EVT VT)
unsigned getInRegsParamsProcessed() const
Libcall getFPTOSINT(EVT OpVT, EVT RetVT)
getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
static bool isZeroOrAllOnes(SDValue N, bool AllOnes)
bool hasVAStart() const
Returns true if the function calls the llvm.va_start intrinsic.
void AddToWorklist(SDNode *N)
static mvt_range fp_valuetypes()
static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC)
IntCCToARMCC - Convert a DAG integer condition code to an ARM CC.
static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, SDLoc dl)
getZeroVector - Returns a vector of specified type with all zero elements.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
static Type * getVoidTy(LLVMContext &C)
This class provides iterator support for SDUse operands that use a specific SDNode.
static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
SDValue getCopyFromReg(SDValue Chain, SDLoc dl, unsigned Reg, EVT VT)
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
SDValue getSExtOrTrunc(SDValue Op, SDLoc DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
bool isBeforeLegalize() const
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang","erlang-compatible garbage collector")
static EVT getFloatingPointVT(unsigned BitWidth)
getFloatingPointVT - Returns the EVT that represents a floating point type with the given number of b...
SDValue getTargetConstant(uint64_t Val, SDLoc DL, EVT VT, bool isOpaque=false)
unsigned getBitWidth() const
Return the number of bits in the APInt.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
unsigned getOpcode() const
TRAP - Trapping instruction.
Value * getOperand(unsigned i) const
unsigned convertAddSubFlagsOpcode(unsigned OldOpc)
Map pseudo instructions that imply an 'S' bit onto real opcodes.
static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, SDValue &RetVal1, SDValue &RetVal2)
Value * getPointerOperand()
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
static mvt_range vector_valuetypes()
std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const override
Return the largest legal super-reg register class of the register class for the specified type and it...
SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const
PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV.
static SDValue getCTPOP16BitCounts(SDNode *N, SelectionDAG &DAG)
getCTPOP16BitCounts - Returns a v8i8/v16i8 vector containing the bit-count for each 16-bit element fr...
bool hasVMLxForwarding() const
Class to represent integer types.
CondCode getSetCCSwappedOperands(CondCode Operation)
getSetCCSwappedOperands - Return the operation corresponding to (Y op X) when given the operation for...
static MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool Immutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
static SDValue PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Target-specific dag combine xforms for ARMISD::BUILD_VECTOR.
bool useNEONForSinglePrecisionFP() const
bool empty() const
empty - Check if the array is empty.
use_iterator use_begin() const
Provide iteration support to walk over all uses of an SDNode.
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
const SDValue & getValue() const
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
unsigned MaxStoresPerMemmove
Specify maximum bytes of store instructions per memmove call.
SDValue getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, bool isVolatile, bool isNonTemporal, bool isInvariant, unsigned Alignment, const AAMDNodes &AAInfo=AAMDNodes())
Bit counting operators with an undefined result for zero inputs.
static SDValue PerformVDUPLANECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
PerformVDUPLANECombine - Target-specific dag combine xforms for ARMISD::VDUPLANE. ...
MachineInstrBuilder BuildMI(MachineFunction &MF, DebugLoc DL, const MCInstrDesc &MCID)
BuildMI - Builder interface.
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo...
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
succ_iterator succ_begin()
void removeSuccessor(MachineBasicBlock *succ)
removeSuccessor - Remove successor from the successors list of this MachineBasicBlock.
static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
EVT - Extended Value Type.
static SDValue PerformORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformORCombine - Target-specific dag combine xforms for ISD::OR.
bool isIntN(unsigned N, int64_t x)
isIntN - Checks if an signed integer fits into the given (dynamic) bit width.
bool isPointerTy() const
isPointerTy - True if this is an instance of PointerType.
std::vector< ArgListEntry > ArgListTy
const APFloat & getValueAPF() const
unsigned getNextStackOffset() const
bool hasSinCos() const
This function returns true if the target has sincos() routine in its compiler runtime or math librari...
bool isEqualTo(SDValue A, SDValue B) const
Test whether two SDValues are known to compare equal.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
CallInst * CreateCall(Value *Callee, ArrayRef< Value * > Args=None, const Twine &Name="")
This structure contains all information that is necessary for lowering calls.
bool isFPOrFPVectorTy() const
isFPOrFPVectorTy - Return true if this is a FP type or a vector of FP.
PointerType * getPointerTo(unsigned AddrSpace=0)
getPointerTo - Return a pointer to the current type.
PointerType * getInt8PtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer to an 8-bit integer value.
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
ARM_AAPCS - ARM Architecture Procedure Calling Standard calling convention (aka EABI).
static ARMConstantPoolMBB * Create(LLVMContext &C, const MachineBasicBlock *mbb, unsigned ID, unsigned char PCAdj)
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements)
getVectorVT - Returns the EVT that represents a vector NumElements in length, where each element is o...
MachinePointerInfo - This class contains a discriminated union of information about pointers in memor...
void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC)
Set the CallingConv that should be used for the specified libcall.
std::pair< unsigned, const TargetRegisterClass * > RCPair
static SDValue lowerCTPOP16BitElements(SDNode *N, SelectionDAG &DAG)
lowerCTPOP16BitElements - Returns a v4i16/v8i16 vector containing the bit-count for each 16-bit eleme...
static Constant * getSequentialMask(IRBuilder<> &Builder, unsigned Start, unsigned NumElts)
Get a mask consisting of sequential integers starting from Start.
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Triple - Helper class for working with autoconf configuration names.
bool isEXTLoad(const SDNode *N)
Returns true if the specified node is a EXTLOAD.
const MachinePointerInfo & getPointerInfo() const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
static SDValue LowerSDIV_v4i8(SDValue X, SDValue Y, SDLoc dl, SelectionDAG &DAG)
void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC)
Override the default CondCode to be used to test the result of the comparison libcall against zero...
unsigned getByValAlign() const
unsigned getMaxSupportedInterleaveFactor() const override
Get the maximum supported factor for interleaved memory accesses.
void SplitString(StringRef Source, SmallVectorImpl< StringRef > &OutFragments, StringRef Delimiters=" \t\n\v\f\r")
SplitString - Split up the specified string according to the specified delimiters, appending the result fragments to the output list.
SmallVectorImpl< unsigned > & getCallSiteLandingPad(MCSymbol *Sym)
getCallSiteLandingPad - Get the call site indexes for a landing pad EH symbol.
bool bitsGT(EVT VT) const
bitsGT - Return true if this has more bits than VT.
bool genLongCalls() const
ArrayRef< int > getMask() const
Libcall getFPEXT(EVT OpVT, EVT RetVT)
getFPEXT - Return the FPEXT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
PerformShiftCombine - Checks for immediate versions of vector shifts and lowers them.
static AddrOpc getAM2Op(unsigned AM2Opc)
SDValue getTargetConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offset=0, unsigned char TargetFlags=0)
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned char TargetFlags=0)
bool hasExternalWeakLinkage() const
TokenFactor - This node takes multiple tokens as input and produces a single token result...
bool hasDLLImportStorageClass() const
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
static const MachineInstrBuilder & AddDefaultCC(const MachineInstrBuilder &MIB)
static int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
SDValue getNOT(SDLoc DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
CCState - This class holds information needed while lowering arguments and return values...
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
void setDesc(const MCInstrDesc &tid)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one...
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="")
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
static SDValue AddCombineToVPADDL(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
bool GVIsIndirectSymbol(const GlobalValue *GV, Reloc::Model RelocM) const
GVIsIndirectSymbol - true if the GV will be accessed via an indirect symbol.
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
unsigned getVectorNumElements() const
void setExceptionPointerRegister(unsigned R)
If set to a physical register, this sets the register that receives the exception address on entry to...
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
StructType::get - This static method is the primary way to create a literal StructType.
static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG)
static unsigned getAM2Offset(unsigned AM2Opc)
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt)
Getvshiftimm - Check if this is a valid build_vector for the immediate operand of a vector shift oper...
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned char TargetFlags=0) const
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
MachineOperand class - Representation of each machine instruction operand.
bool isPredecessorOf(const SDNode *N) const
Return true if this node is a predecessor of N.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG, const EVT &OrigTy, const EVT &ExtTy, unsigned ExtOpcode)
AddRequiredExtensionForVMULL - Add a sign/zero extension to extend the total value size to 64 bits...
Type * getType() const
All values are typed, get the type of this value.
const InstrItineraryData * getInstrItineraryData() const override
getInstrItins - Return the instruction itineraries based on subtarget selection.
Provides information about what library functions are available for the current target.
CCValAssign - Represent assignment of one arg/retval to a location.
static void ReplaceREADCYCLECOUNTER(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
const uint32_t * getNoPreservedMask() const
BRCOND - Conditional branch.
SDNode * getGluedUser() const
If this node has a glue value with a user, return the user (there is at most one).
const SDValue & getChain() const
Byte Swap and Counting operators.
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
This is an abstract virtual class for memory operations.
BasicBlock * GetInsertBlock() const
static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG)
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
CallLoweringInfo & setSExtResult(bool Value=true)
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Function * getCalledFunction() const
getCalledFunction - Return the function called, or null if this is an indirect function invocation...
bool isFPImmLegal(const APFloat &Imm, EVT VT) const override
isFPImmLegal - Returns true if the target can instruction select the specified FP immediate natively...
MachineFrameInfo * getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
static void emitPostSt(MachineBasicBlock *BB, MachineInstr *Pos, const TargetInstrInfo *TII, DebugLoc dl, unsigned StSize, unsigned Data, unsigned AddrIn, unsigned AddrOut, bool IsThumb1, bool IsThumb2)
Emit a post-increment store operation with given size.
Represents one node in the SelectionDAG.
CondCode getSetCCInverse(CondCode Operation, bool isInteger)
getSetCCInverse - Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operat...
const MachineInstrBuilder & addFrameIndex(int Idx) const
static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode, bool &swpCmpOps, bool &swpVselOps)
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
static MachinePointerInfo getStack(int64_t Offset)
getStack - stack pointer relative access.
static cl::opt< AlignMode > Align(cl::desc("Load/store alignment support"), cl::Hidden, cl::init(NoStrictAlign), cl::values(clEnumValN(StrictAlign,"aarch64-strict-align","Disallow all unaligned memory accesses"), clEnumValN(NoStrictAlign,"aarch64-no-strict-align","Allow unaligned memory accesses"), clEnumValEnd))
static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG)
ExpandBITCAST - If the target supports VFP, this function is called to expand a bit convert where eit...
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
R Default(const T &Value) const
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
static mvt_range integer_valuetypes()
unsigned Log2_32(uint32_t Value)
Log2_32 - This function returns the floor log base 2 of the specified value, -1 if the value is zero...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, const ARMSubtarget *ST, SDLoc dl)
Value * CreateZExtOrBitCast(Value *V, Type *DestTy, const Twine &Name="")
Value * getArgOperand(unsigned i) const
getArgOperand/setArgOperand - Return/set the i-th call argument.
static unsigned isNEONTwoResultShuffleMask(ArrayRef< int > ShuffleMask, EVT VT, unsigned &WhichResult, bool &isV_UNDEF)
Check if ShuffleMask is a NEON two-result shuffle (VZIP, VUZP, VTRN), and return the corresponding AR...
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
bool ExpandInlineAsm(CallInst *CI) const override
This hook allows the target to expand an inline asm call to be explicit llvm code if it wants to...
VectorType - Class to represent vector types.
void setIndexedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
Class for arbitrary precision integers.
static ARMConstantPoolSymbol * Create(LLVMContext &C, const char *s, unsigned ID, unsigned char PCAdj)
void setExceptionSelectorRegister(unsigned R)
If set to a physical register, this sets the register that receives the exception typeid on entry to ...
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
A "pseudo-class" with methods for operating on BUILD_VECTORs.
Select(COND, TRUEVAL, FALSEVAL).
void setMinFunctionAlignment(unsigned Align)
Set the target's minimum function alignment (in log2(bytes))
int64_t getSExtValue() const
bool isIntegerTy() const
isIntegerTy - True if this is an instance of IntegerType.
virtual const TargetRegisterClass * getRegClassFor(MVT VT) const
Return the register class that should be used for the specified value type.
static use_iterator use_end()
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Instruction * emitLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord, bool IsStore, bool IsLoad) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
bool isPowerOf2_64(uint64_t Value)
isPowerOf2_64 - This function returns true if the argument is a power of two 0 (64 bit edition...
APInt bitcastToAPInt() const
LLVM_ATTRIBUTE_UNUSED_RESULT std::enable_if< !is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
ANY_EXTEND - Used for integer types. The high bits are undefined.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Value * CreateTruncOrBitCast(Value *V, Type *DestTy, const Twine &Name="")
iterator insert(iterator I, T &&Elt)
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
static int getSOImmVal(unsigned Arg)
getSOImmVal - Given a 32-bit immediate, if it is something that can fit into an shifter_operand immed...
void setArgumentStackSize(unsigned size)
bool isOSVersionLT(unsigned Major, unsigned Minor=0, unsigned Micro=0) const
isOSVersionLT - Helper function for doing comparisons against version numbers included in the target ...
static SDValue PerformXORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static bool isVTBLMask(ArrayRef< int > M, EVT VT)
static void emitPostLd(MachineBasicBlock *BB, MachineInstr *Pos, const TargetInstrInfo *TII, DebugLoc dl, unsigned LdSize, unsigned Data, unsigned AddrIn, unsigned AddrOut, bool IsThumb1, bool IsThumb2)
Emit a post-increment load operation with given size.
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
const TargetRegisterClass * getRegClassFor(MVT VT) const override
getRegClassFor - Return the register class that should be used for the specified value type...
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
BR_JT - Jumptable branch.
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, unsigned Intrinsic) const override
getTgtMemIntrinsic - Represent NEON load and store intrinsics as MemIntrinsicNodes.
Representation of each machine instruction.
static MachinePointerInfo getGOT()
getGOT - Return a MachinePointerInfo record that refers to a GOT entry.
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the source.
uint64_t MinAlign(uint64_t A, uint64_t B)
MinAlign - A and B are either alignments or offsets.
Libcall getFPTOUINT(EVT OpVT, EVT RetVT)
getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
static CondCodes getOppositeCondition(CondCodes CC)
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
static bool canChangeToInt(SDValue Op, bool &SeenZero, const ARMSubtarget *Subtarget)
canChangeToInt - Given the fp compare operand, return true if it is suitable to morph to an integer c...
static bool definesCPSR(const MachineInstr *MI)
void setVarArgsFrameIndex(int Index)
SmallVector< SDValue, 32 > OutVals
unsigned getSchedClass() const
Return the scheduling class for this instruction.
bool isLandingPad() const
isLandingPad - Returns true if the block is a landing pad.
static SDValue PerformVMOVRRDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformVMOVRRDCombine - Target-specific dag combine xforms for ARMISD::VMOVRRD.
static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, SDValue RHS, SelectionDAG &DAG, SDLoc dl)
GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit the specified operations t...
static bool LowerToByteSwap(CallInst *CI)
LowerToByteSwap - Replace a call instruction into a call to bswap intrinsic.
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Bitwise operators - logical and, logical or, logical xor.
bool hasAnyUseOfValue(unsigned Value) const
Return true if there are any use of the indicated value.
pointer data()
Return a pointer to the vector's buffer, even if empty().
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
static IntegerType * getInt32Ty(LLVMContext &C)
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
static bool isVUZP_v_undef_Mask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of "vector_shuffle v...
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
unsigned getAlignment() const
getAlignment - Return the alignment of the access that is being performed
bool isVectorLoadExtDesirable(SDValue ExtVal) const override
Return true if folding a vector load into ExtVal (a sign, zero, or any extend node) is profitable...
virtual unsigned isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const
If the specified machine instruction is a direct load from a stack slot, return the virtual or physic...
ARMFunctionInfo - This class is derived from MachineFunctionInfo and contains private ARM-specific in...
static bool isLegalT2AddressImmediate(int64_t V, EVT VT, const ARMSubtarget *Subtarget)
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
unsigned getSizeInBits() const
getSizeInBits - Return the size of the specified value type in bits.
static bool isLegalAddressImmediate(int64_t V, EVT VT, const ARMSubtarget *Subtarget)
isLegalAddressImmediate - Return true if the integer value can be used as the offset of the target ad...
void setReg(unsigned Reg)
Change the register this operand corresponds to.
static SDValue PerformVDIVCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD) can replace combinations of ...
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned char TargetFlags=0) const
void setArgRegsSaveSize(unsigned s)
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
virtual std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const
Return the largest legal super-reg register class of the register class for the specified type and it...
FunctionType * getFunctionType() const
Fast - This calling convention attempts to make calls as fast as possible (e.g.
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
APFloat abs(APFloat X)
Returns the absolute value of the argument.
const ARMBaseRegisterInfo * getRegisterInfo() const override
static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, ARMCC::CondCodes &CondCode2)
FPCCToARMCC - Convert a DAG fp condition code to an ARM CC.
unsigned MaxStoresPerMemmoveOptSize
Maximum number of store instructions that may be substituted for a call to memmove, used for functions with OpSize attribute.
unsigned MaxStoresPerMemcpyOptSize
Maximum number of store operations that may be substituted for a call to memcpy, used for functions w...
void setStackPointerRegisterToSaveRestore(unsigned R)
If set to a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save and restore.
bool allowsUnalignedMem() const
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
VectorType * getType() const
getType - Overload to return most specific vector type.
bool isLegalAddImmediate(int64_t Imm) const override
isLegalAddImmediate - Return true if the specified immediate is legal add immediate, that is the target has add instructions which can add a register and the immediate without having to materialize the immediate into a register.
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, bool isSEXTLoad, SDValue &Base, SDValue &Offset, bool &isInc, SelectionDAG &DAG)
FSINCOS - Compute both fsin and fcos as a single operation.
unsigned MaxStoresPerMemcpy
Specify maximum bytes of store instructions per memcpy call.
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
getEVT - Return the value type corresponding to the specified type.
bool isAtLeastRelease(AtomicOrdering Ord)
Returns true if the ordering is at least as strong as release (i.e.
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.setjmp intrinsic.
static SDValue AddCombineTo64bitMLAL(SDNode *AddcNode, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
EVT getValueType() const
Return the ValueType of the referenced return value.
MachineInstr * getVRegDef(unsigned Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
bool useMovt(const MachineFunction &MF) const
bool hasLocalLinkage() const
bool all_of(R &&Range, UnaryPredicate &&P)
Provide wrappers to std::all_of which take ranges instead of having to pass being/end explicitly...
Libcall getSINTTOFP(EVT OpVT, EVT RetVT)
getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
SDValue getConstant(uint64_t Val, SDLoc DL, EVT VT, bool isTarget=false, bool isOpaque=false)
bool is128BitVector() const
is128BitVector - Return true if this is a 128-bit vector type.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
SDValue getGLOBAL_OFFSET_TABLE(EVT VT)
Return a GLOBAL_OFFSET_TABLE node. This does not have a useful SDLoc.
static SDValue PerformVLDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
unsigned getReg() const
getReg - Returns the register number.
StringRef getValueAsString() const
Return the attribute's value as a string.
bool isFloatingPoint() const
isFloatingPoint - Return true if this is a FP, or a vector FP type.
bool isLegalICmpImmediate(int64_t Imm) const override
isLegalICmpImmediate - Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructions which can compare a register against the immediate without having to materialize the immediate into a register.
static bool isVTRN_v_undef_Mask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of "vector_shuffle v...
bool hasLoadLinkedStoreConditional() const override
True if AtomicExpandPass should use emitLoadLinked/emitStoreConditional and expand AtomicCmpXchgInst...
static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes, SDValue &CC, bool &Invert, SDValue &OtherOp, SelectionDAG &DAG)
void insert(iterator MBBI, MachineBasicBlock *MBB)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
void setReturnAddressIsTaken(bool s)
bool isSimple() const
isSimple - Test if the given EVT is simple (as opposed to being extended).
unsigned getAlignment() const
static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
void setMinStackArgumentAlignment(unsigned Align)
Set the minimum stack alignment of an argument (in log2(bytes)).
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
bool isShuffleMaskLegal(const SmallVectorImpl< int > &M, EVT VT) const override
isShuffleMaskLegal - Targets can use this to indicate that they only support some VECTOR_SHUFFLE oper...
CallLoweringInfo & setInRegister(bool Value=true)
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
getPrimitiveSizeInBits - Return the basic size of this type if it is a primitive type.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
std::reverse_iterator< iterator > reverse_iterator
Module * getParent()
Get the module that this global value is contained inside of...
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="")
LLVM Value Representation.
FMA - Perform a * b + c with no intermediate rounding step.
int getVarArgsFrameIndex() const
SDValue getRegister(unsigned Reg, EVT VT)
bool useLoadStackGuardNode() const override
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
Instruction * makeDMB(IRBuilder<> &Builder, ARM_MB::MemBOpt Domain) const
void setInsertFencesForAtomic(bool fence)
Set if the DAG builder should automatically insert fences and reduce the order of atomic memory opera...
static VectorType * get(Type *ElementType, unsigned NumElements)
VectorType::get - This static method is the primary way to construct an VectorType.
static unsigned getLdOpcode(unsigned LdSize, bool IsThumb1, bool IsThumb2)
Return the load opcode for a given load size.
bool isTruncatingStore() const
Return true if the op does a truncation before store.
ARMTargetLowering(const TargetMachine &TM, const ARMSubtarget &STI)
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
SDValue getValueType(EVT)
Disable implicit floating point insts.
const MCOperandInfo * OpInfo
bool isWindowsItaniumEnvironment() const
PREFETCH - This corresponds to a prefetch intrinsic.
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override
Returns true if an argument of type Ty needs to be passed in a contiguous block of registers in calli...
MO_NONLAZY - This is an independent flag, on a symbol operand "FOO" it represents a symbol which...
bool isUInt< 16 >(uint64_t x)
const MachineInstrBuilder & addOperand(const MachineOperand &MO) const
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint letter, return the type of constraint it is for this target...
BasicBlockListType::iterator iterator
const TargetLowering & getTargetLoweringInfo() const
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
void rewindByValRegsInfo()
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.Val alone...
Primary interface to the complete machine description for the target machine.
bool hasDataBarrier() const
static SDValue PerformSTORECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
PerformSTORECombine - Target-specific dag combine xforms for ISD::STORE.
C - The default llvm calling convention, compatible with C.
bool isPowerOf2_32(uint32_t Value)
isPowerOf2_32 - This function returns true if the argument is a power of two > 0. ...
bool hasDivideInARMMode() const
static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG)
StringRef - Represent a constant reference to a string, i.e.
MachineModuleInfo & getMMI() const
SetCC operator - This evaluates to a true value iff the condition is true.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
unsigned MaxStoresPerMemsetOptSize
Maximum number of stores operations that may be substituted for the call to memset, used for functions with OptSize attribute.
static MachineBasicBlock * OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml","ocaml 3.10-compatible collector")
bool hasOptionalDef(QueryType Type=IgnoreBundle) const
Set if this instruction has an optional definition, e.g.
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
unsigned getLocMemOffset() const
MVT getVectorElementType() const
static bool isVolatile(Instruction *Inst)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG)
TRUNCATE - Completely drop the high bits.
bool isUIntN(unsigned N, uint64_t x)
isUIntN - Checks if an unsigned integer fits into the given (dynamic) bit width.
unsigned getAlignment() const
bool isBitFieldInvertedMask(unsigned v)
unsigned AllocateReg(unsigned Reg)
AllocateReg - Attempt to allocate one register.
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
addReg - Add a new virtual register operand...
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override
Returns true if the given (atomic) store should be expanded by the IR-level AtomicExpand pass into an...
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const
bool is64BitVector() const
is64BitVector - Return true if this is a 64-bit vector type.
FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW, FLOG, FLOG2, FLOG10, FEXP, FEXP2, FCEIL, FTRUNC, FRINT, FNEARBYINT, FROUND, FFLOOR - Perform various unary floating point operations.
bool isEmpty() const
Returns true if there are no itineraries.
Value * getPointerOperand()
void addSuccessor(MachineBasicBlock *succ, uint32_t weight=0)
addSuccessor - Add succ as a successor of this MachineBasicBlock.
bool hasThumb2DSP() const
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
bool isTargetWindows() const
unsigned Log2_64(uint64_t Value)
Log2_64 - This function returns the floor log base 2 of the specified value, -1 if the value is zero...
const BasicBlock * getParent() const
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
static ARMConstantPoolConstant * Create(const Constant *C, unsigned ID)
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
static bool isSplatMask(const int *Mask, EVT VT)
unsigned getConstantPoolIndex(const Constant *C, unsigned Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one...
static bool isVEXTMask(ArrayRef< int > M, EVT VT, bool &ReverseVEXT, unsigned &Imm)
EVT changeVectorElementTypeToInteger() const
changeVectorElementTypeToInteger - Return a vector with the same number of elements as this vector...
static bool memOpAlign(unsigned DstAlign, unsigned SrcAlign, unsigned AlignCheck)
bool PredictableSelectIsExpensive
Tells the code generator that select is more expensive than a branch if the branch is usually predict...
static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG)
Instruction * emitTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord, bool IsStore, bool IsLoad) const override
MVT getSimpleVT() const
getSimpleVT - Return the SimpleValueType held in the specified simple EVT.
static const MachineInstrBuilder & AddDefaultT1CC(const MachineInstrBuilder &MIB, bool isDead=false)
SDValue getIntPtrConstant(uint64_t Val, SDLoc DL, bool isTarget=false)
SDValue getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align=0, bool Vol=false, bool ReadMem=true, bool WriteMem=true, unsigned Size=0)
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode...
MachineModuleInfo - This class contains meta information specific to a module.
LLVMContext & getContext() const
Get the global data context.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, unsigned Align, bool *Fast) const override
allowsMisalignedMemoryAccesses - Returns true if the target allows unaligned memory accesses of the s...
void setIndexedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
FloatABI::ABIType FloatABIType
FloatABIType - This setting is set by -float-abi=xxx option is specfied on the command line...
uint64_t getZExtValue() const
static uint64_t decodeNEONModImm(unsigned ModImm, unsigned &EltBits)
decodeNEONModImm - Decode a NEON modified immediate value into the element value and the element size...
void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero, APInt &KnownOne, const SelectionDAG &DAG, unsigned Depth) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
unsigned getVectorNumElements() const
getVectorNumElements - Given a vector type, return the number of elements it contains.
void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, SDValue &NewRHS, ISD::CondCode &CCCode, SDLoc DL) const
SoftenSetCCOperands - Soften the operands of a comparison.
This class is used to represent ISD::LOAD nodes.
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary...
Function must be optimized for size first.