89 #define DEBUG_TYPE "aarch64-lower"
91 STATISTIC(NumTailCalls,
"Number of tail calls");
92 STATISTIC(NumShiftInserts,
"Number of vector shift inserts");
96 cl::desc(
"Allow AArch64 SLI/SRI formation"),
104 cl::desc(
"Allow AArch64 Local Dynamic TLS code generation"),
691 void AArch64TargetLowering::addTypeForNEON(
MVT VT,
MVT PromotedBitwiseVT) {
774 void AArch64TargetLowering::addDRTypeForNEON(
MVT VT) {
779 void AArch64TargetLowering::addQRTypeForNEON(
MVT VT) {
801 APInt KnownZero2, KnownOne2;
804 KnownZero &= KnownZero2;
805 KnownOne &= KnownOne2;
813 case Intrinsic::aarch64_ldaxr:
814 case Intrinsic::aarch64_ldxr: {
816 EVT VT = cast<MemIntrinsicSDNode>(
Op)->getMemoryVT();
826 unsigned IntNo = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
830 case Intrinsic::aarch64_neon_umaxv:
831 case Intrinsic::aarch64_neon_uminv: {
839 assert(BitWidth >= 8 &&
"Unexpected width!");
843 assert(BitWidth >= 16 &&
"Unexpected width!");
1090 case AArch64::F128CSEL:
1093 case TargetOpcode::STACKMAP:
1094 case TargetOpcode::PATCHPOINT:
1262 return (C >> 12 == 0) || ((C & 0xFFFULL) == 0 && C >> 24 == 0);
1360 unsigned Opcode = 0;
1383 return DAG.
getNode(Opcode, DL,
MVT_CC, LHS, RHS, NZCVOp, Condition, CCOp);
1393 unsigned Depth = 0) {
1419 if (!CanNegateL && !CanNegateR)
1423 CanNegate = CanNegateL && CanNegateR;
1430 if (NeedsNegOutL && NeedsNegOutR)
1480 Predicate = ExtraCC;
1492 "Valid conjunction/disjunction tree");
1500 bool NegateOpsAndResult = Opcode ==
ISD::OR;
1504 if (NegateOpsAndResult) {
1508 assert(isValidL &&
"Valid conjunction/disjunction tree");
1514 assert(isValidR &&
"Valid conjunction/disjunction tree");
1515 assert((CanNegateL || CanNegateR) &&
"Valid conjunction/disjunction tree");
1522 bool NeedsNegOutL = LHS->getOpcode() ==
ISD::OR;
1524 "Valid conjunction/disjunction tree");
1537 if (NegateOpsAndResult && !Negate)
1541 NegateOpsAndResult, CmpR,
1545 if (NegateOpsAndResult && !Negate)
1570 uint64_t
C = RHSC->getZExtValue();
1578 if ((VT ==
MVT::i32 && C != 0x80000000 &&
1580 (VT ==
MVT::i64 && C != 0x80000000ULL &&
1599 if ((VT ==
MVT::i32 && C != INT32_MAX &&
1601 (VT ==
MVT::i64 && C != INT64_MAX &&
1610 if ((VT ==
MVT::i32 && C != UINT32_MAX &&
1612 (VT ==
MVT::i64 && C != UINT64_MAX &&
1643 if ((RHSC->
getZExtValue() >> 16 == 0) && isa<LoadSDNode>(LHS) &&
1644 cast<LoadSDNode>(LHS)->getExtensionType() ==
ISD::ZEXTLOAD &&
1645 cast<LoadSDNode>(LHS)->getMemoryVT() ==
MVT::i16 &&
1647 int16_t ValueofRHS = cast<ConstantSDNode>(RHS)->getZExtValue();
1675 static std::pair<SDValue, SDValue>
1678 "Unsupported value type");
1752 UpperBits).getValue(1);
1774 UpperBits).getValue(1);
1784 Value = DAG.
getNode(Opc, DL, VTs, LHS, RHS);
1787 return std::make_pair(Value, Overflow);
1828 if (!CFVal || !CTVal)
1833 if (CTVal->isAllOnesValue() && CFVal->
isNullValue()) {
1865 bool ExtraOp =
false;
1924 unsigned IsWrite = cast<ConstantSDNode>(Op.
getOperand(2))->getZExtValue();
1925 unsigned Locality = cast<ConstantSDNode>(Op.
getOperand(3))->getZExtValue();
1926 unsigned IsData = cast<ConstantSDNode>(Op.
getOperand(4))->getZExtValue();
1928 bool IsStream = !Locality;
1932 assert(Locality <= 3 &&
"Prefetch locality out-of-range");
1936 Locality = 3 - Locality;
1940 unsigned PrfOp = (IsWrite << 4) |
1955 return LowerF128Call(Op, DAG, LC);
2063 In = DAG.
getNode(CastOpc, dl, CastVT, In);
2099 return LowerF128Call(Op, DAG, LC);
2116 Entry.isSExt =
false;
2117 Entry.isZExt =
false;
2118 Args.push_back(Entry);
2120 const char *LibcallName =
2121 (ArgVT ==
MVT::f64) ?
"__sincos_stret" :
"__sincosf_stret";
2130 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
2131 return CallResult.first;
2156 switch (OrigSimpleTy) {
2169 unsigned ExtOpcode) {
2193 unsigned HalfSize = EltSize / 2;
2195 if (!
isIntN(HalfSize,
C->getSExtValue()))
2198 if (!
isUIntN(HalfSize,
C->getZExtValue()))
2219 unsigned EltSize = VT.getScalarSizeInBits() / 2;
2220 unsigned NumElts = VT.getVectorNumElements();
2223 for (
unsigned i = 0;
i != NumElts; ++
i) {
2276 "unexpected type for custom-lowering ISD::MUL");
2279 unsigned NewOpc = 0;
2283 if (isN0SExt && isN1SExt)
2288 if (isN0ZExt && isN1ZExt)
2290 else if (isN1SExt || isN1ZExt) {
2324 "unexpected types for extended operands to VMULL");
2325 return DAG.
getNode(NewOpc, DL, VT, Op0, Op1);
2333 return DAG.
getNode(N0->getOpcode(), DL, VT,
2340 SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(
SDValue Op,
2342 unsigned IntNo = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
2346 case Intrinsic::thread_pointer: {
2350 case Intrinsic::aarch64_neon_smax:
2353 case Intrinsic::aarch64_neon_umax:
2356 case Intrinsic::aarch64_neon_smin:
2359 case Intrinsic::aarch64_neon_umin:
2374 return LowerGlobalAddress(Op, DAG);
2376 return LowerGlobalTLSAddress(Op, DAG);
2378 return LowerSETCC(Op, DAG);
2380 return LowerBR_CC(Op, DAG);
2382 return LowerSELECT(Op, DAG);
2384 return LowerSELECT_CC(Op, DAG);
2386 return LowerJumpTable(Op, DAG);
2388 return LowerConstantPool(Op, DAG);
2390 return LowerBlockAddress(Op, DAG);
2392 return LowerVASTART(Op, DAG);
2394 return LowerVACOPY(Op, DAG);
2396 return LowerVAARG(Op, DAG);
2418 return LowerFP_ROUND(Op, DAG);
2420 return LowerFP_EXTEND(Op, DAG);
2422 return LowerFRAMEADDR(Op, DAG);
2424 return LowerRETURNADDR(Op, DAG);
2426 return LowerINSERT_VECTOR_ELT(Op, DAG);
2428 return LowerEXTRACT_VECTOR_ELT(Op, DAG);
2430 return LowerBUILD_VECTOR(Op, DAG);
2432 return LowerVECTOR_SHUFFLE(Op, DAG);
2434 return LowerEXTRACT_SUBVECTOR(Op, DAG);
2438 return LowerVectorSRA_SRL_SHL(Op, DAG);
2440 return LowerShiftLeftParts(Op, DAG);
2443 return LowerShiftRightParts(Op, DAG);
2445 return LowerCTPOP(Op, DAG);
2447 return LowerFCOPYSIGN(Op, DAG);
2449 return LowerVectorAND(Op, DAG);
2451 return LowerVectorOR(Op, DAG);
2458 return LowerINT_TO_FP(Op, DAG);
2461 return LowerFP_TO_INT(Op, DAG);
2463 return LowerFSINCOS(Op, DAG);
2467 return LowerINTRINSIC_WO_CHAIN(Op, DAG);
2475 #include "AArch64GenCallingConv.inc"
2479 bool IsVarArg)
const {
2484 return CC_AArch64_WebKit_JS;
2486 return CC_AArch64_GHC;
2493 return CC_AArch64_AAPCS;
2494 return IsVarArg ? CC_AArch64_DarwinPCS_VarArg : CC_AArch64_DarwinPCS;
2501 : RetCC_AArch64_AAPCS;
2504 SDValue AArch64TargetLowering::LowerFormalArguments(
2522 unsigned NumArgs = Ins.
size();
2524 unsigned CurArgIdx = 0;
2525 for (
unsigned i = 0;
i != NumArgs; ++
i) {
2526 MVT ValVT = Ins[
i].VT;
2527 if (Ins[
i].isOrigArg()) {
2528 std::advance(CurOrigArg, Ins[
i].getOrigArgIndex() - CurArgIdx);
2529 CurArgIdx = Ins[
i].getOrigArgIndex();
2544 assert(!Res &&
"Call operand has unhandled type");
2549 for (
unsigned i = 0, e = ArgLocs.
size();
i != e; ++
i) {
2552 if (Ins[
i].
Flags.isByVal()) {
2556 int Size = Ins[
i].Flags.getByValSize();
2557 unsigned NumRegs = (Size + 7) / 8;
2577 RC = &AArch64::GPR32RegClass;
2579 RC = &AArch64::GPR64RegClass;
2581 RC = &AArch64::FPR16RegClass;
2582 else if (RegVT == MVT::f32)
2583 RC = &AArch64::FPR32RegClass;
2585 RC = &AArch64::FPR64RegClass;
2587 RC = &AArch64::FPR128RegClass;
2611 assert(RegVT == Ins[
i].VT &&
"incorrect register location selected");
2624 !Ins[
i].Flags.isInConsecutiveRegs())
2625 BEAlign = 8 - ArgSize;
2655 ExtType, DL, VA.
getLocVT(), Chain, FIN,
2670 saveVarArgRegisters(CCInfo, DAG, DL, Chain);
2674 unsigned StackOffset = CCInfo.getNextStackOffset();
2676 StackOffset = ((StackOffset + 7) & ~7);
2680 unsigned StackArgSize = CCInfo.getNextStackOffset();
2682 if (DoesCalleeRestoreStack(CallConv, TailCallOpt)) {
2686 StackArgSize =
alignTo(StackArgSize, 16);
2703 void AArch64TargetLowering::saveVarArgRegisters(
CCState &CCInfo,
2715 AArch64::X3, AArch64::X4, AArch64::X5,
2716 AArch64::X6, AArch64::X7 };
2717 static const unsigned NumGPRArgRegs =
array_lengthof(GPRArgRegs);
2720 unsigned GPRSaveSize = 8 * (NumGPRArgRegs - FirstVariadicGPR);
2722 if (GPRSaveSize != 0) {
2727 for (
unsigned i = FirstVariadicGPR;
i < NumGPRArgRegs; ++
i) {
2728 unsigned VReg = MF.
addLiveIn(GPRArgRegs[
i], &AArch64::GPR64RegClass);
2733 MemOps.push_back(Store);
2743 AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3,
2744 AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64::Q7};
2745 static const unsigned NumFPRArgRegs =
array_lengthof(FPRArgRegs);
2748 unsigned FPRSaveSize = 16 * (NumFPRArgRegs - FirstVariadicFPR);
2750 if (FPRSaveSize != 0) {
2755 for (
unsigned i = FirstVariadicFPR; i < NumFPRArgRegs; ++
i) {
2756 unsigned VReg = MF.
addLiveIn(FPRArgRegs[i], &AArch64::FPR128RegClass);
2762 MemOps.push_back(Store);
2771 if (!MemOps.empty()) {
2778 SDValue AArch64TargetLowering::LowerCallResult(
2784 ? RetCC_AArch64_WebKit_JS
2785 : RetCC_AArch64_AAPCS;
2793 for (
unsigned i = 0; i != RVLocs.
size(); ++
i) {
2798 if (i == 0 && isThisReturn) {
2800 "unexpected return calling convention register assignment");
2843 bool AArch64TargetLowering::isEligibleForTailCallOptimization(
2854 bool CCMatch = CallerCC == CalleeCC;
2862 if (i->hasByValAttr())
2890 "Unexpected variadic calling convention");
2893 if (isVarArg && !Outs.
empty()) {
2901 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
2905 if (!ArgLoc.isRegLoc())
2919 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2928 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
2946 SDValue AArch64TargetLowering::addTokenForArgument(
SDValue Chain,
2949 int ClobberedFI)
const {
2952 int64_t LastByte = FirstByte + MFI.
getObjectSize(ClobberedFI) - 1;
2965 if (FI->getIndex() < 0) {
2967 int64_t InLastByte = InFirstByte;
2970 if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) ||
2971 (FirstByte <= InFirstByte && InFirstByte <= LastByte))
2979 bool AArch64TargetLowering::DoesCalleeRestoreStack(
CallingConv::ID CallCC,
2980 bool TailCallOpt)
const {
2987 AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
2996 bool &IsTailCall = CLI.IsTailCall;
2998 bool IsVarArg = CLI.IsVarArg;
3001 bool IsThisReturn =
false;
3005 bool IsSibCall =
false;
3009 IsTailCall = isEligibleForTailCallOptimization(
3010 Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG);
3011 if (!IsTailCall && CLI.CS && CLI.CS->isMustTailCall())
3013 "site marked musttail");
3017 if (!TailCallOpt && IsTailCall)
3032 unsigned NumArgs = Outs.
size();
3034 for (
unsigned i = 0; i != NumArgs; ++
i) {
3035 MVT ArgVT = Outs[
i].VT;
3040 assert(!Res &&
"Call operand has unhandled type");
3050 unsigned NumArgs = Outs.
size();
3051 for (
unsigned i = 0; i != NumArgs; ++
i) {
3052 MVT ValVT = Outs[
i].VT;
3055 CLI.getArgs()[Outs[
i].OrigArgIndex].Ty,
3067 assert(!Res &&
"Call operand has unhandled type");
3088 if (IsTailCall && !IsSibCall) {
3093 NumBytes =
alignTo(NumBytes, 16);
3098 FPDiff = NumReusableBytes - NumBytes;
3105 assert(FPDiff % 16 == 0 &&
"unaligned stack on tail call");
3123 for (
unsigned i = 0, realArgIdx = 0, e = ArgLocs.
size(); i != e;
3124 ++
i, ++realArgIdx) {
3126 SDValue Arg = OutVals[realArgIdx];
3142 if (Outs[realArgIdx].ArgVT ==
MVT::i1) {
3161 "unexpected calling convention register assignment");
3163 "unexpected use of 'returned'");
3164 IsThisReturn =
true;
3178 OpSize = (OpSize + 7) / 8;
3182 BEAlign = 8 - OpSize;
3185 int32_t
Offset = LocMemOffset + BEAlign;
3190 Offset = Offset + FPDiff;
3200 Chain = addTokenForArgument(Chain, DAG, MF.
getFrameInfo(), FI);
3209 if (Outs[i].Flags.
isByVal()) {
3213 Chain, DL, DstAddr, Arg, SizeNode, Outs[i].Flags.
getByValAlign(),
3233 if (!MemOpChains.
empty())
3239 for (
auto &RegToPass : RegsToPass) {
3241 RegToPass.second, InFlag);
3253 if (InternalLinkage)
3261 dyn_cast<ExternalSymbolSDNode>(Callee)) {
3262 const char *Sym = S->getSymbol();
3270 const char *Sym = S->getSymbol();
3278 if (IsTailCall && !IsSibCall) {
3284 std::vector<SDValue> Ops;
3285 Ops.push_back(Chain);
3286 Ops.push_back(Callee);
3297 for (
auto &RegToPass : RegsToPass)
3299 RegToPass.second.getValueType()));
3308 IsThisReturn =
false;
3314 assert(Mask &&
"Missing call preserved mask for calling convention");
3318 Ops.push_back(InFlag);
3333 uint64_t CalleePopBytes =
3334 DoesCalleeRestoreStack(CallConv, TailCallOpt) ?
alignTo(NumBytes, 16) : 0;
3344 return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
3345 InVals, IsThisReturn,
3346 IsThisReturn ? OutVals[0] :
SDValue());
3349 bool AArch64TargetLowering::CanLowerReturn(
3353 ? RetCC_AArch64_WebKit_JS
3354 : RetCC_AArch64_AAPCS;
3356 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
3367 ? RetCC_AArch64_WebKit_JS
3368 : RetCC_AArch64_AAPCS;
3377 for (
unsigned i = 0, realRVLocIdx = 0; i != RVLocs.
size();
3378 ++
i, ++realRVLocIdx) {
3381 SDValue Arg = OutVals[realRVLocIdx];
3387 if (Outs[i].ArgVT ==
MVT::i1) {
3409 if (AArch64::GPR64RegClass.
contains(*I))
3411 else if (AArch64::FPR64RegClass.
contains(*I))
3422 RetOps.push_back(Flag);
3437 unsigned char OpFlags =
3441 "unexpected offset in global node");
3501 AArch64TargetLowering::LowerDarwinGlobalTLSAddress(
SDValue Op,
3507 const GlobalValue *GV = cast<GlobalAddressSDNode>(
Op)->getGlobal();
3562 SDValue AArch64TargetLowering::LowerELFTLSDescCallSeq(
SDValue SymAddr,
3578 AArch64TargetLowering::LowerELFGlobalTLSAddress(
SDValue Op,
3582 "ELF TLS only supported in small memory model");
3647 TPOff = LowerELFTLSDescCallSeq(SymAddr, DL, DAG);
3671 TPOff = LowerELFTLSDescCallSeq(SymAddr, DL, DAG);
3681 return LowerDarwinGlobalTLSAddress(Op, DAG);
3683 return LowerELFGlobalTLSAddress(Op, DAG);
3717 "Unexpected condition code.");
3830 else if (SrcVT.
bitsGT(VT))
3840 EltMask = 0x80000000ULL;
3896 Attribute::NoImplicitFloat))
3931 return LowerVSETCC(Op, DAG);
3951 "Unexpected setcc expansion!");
4059 }
else if (CTVal && CFVal) {
4060 const int64_t TrueVal = CTVal->getSExtValue();
4067 if (TrueVal == ~FalseVal) {
4069 }
else if (TrueVal == -FalseVal) {
4077 const uint32_t TrueVal32 = CTVal->getZExtValue();
4080 if ((TrueVal32 == FalseVal32 + 1) || (TrueVal32 + 1 == FalseVal32)) {
4083 if (TrueVal32 > FalseVal32) {
4088 }
else if ((TrueVal == FalseVal + 1) || (TrueVal + 1 == FalseVal)) {
4091 if (TrueVal > FalseVal) {
4123 else if (CFVal && CFVal == RHSVal && AArch64CC ==
AArch64CC::NE)
4126 assert (CTVal && CFVal &&
"Expected constant operands for CSNEG.");
4141 return DAG.
getNode(Opcode, dl, VT, TVal, FVal, CCVal, Cmp);
4159 if (RHSVal && RHSVal->
isZero()) {
4167 CFVal && CFVal->
isZero() &&
4196 return LowerSELECT_CC(CC, LHS, RHS, TVal, FVal, DL, DAG);
4231 CC = cast<CondCodeSDNode>(CCVal->getOperand(2))->
get();
4234 RHS = DAG.
getConstant(0, DL, CCVal.getValueType());
4237 return LowerSELECT_CC(CC, LHS, RHS, TVal, FVal, DL, DAG);
4311 const BlockAddress *BA = cast<BlockAddressSDNode>(
Op)->getBlockAddress();
4340 const Value *SV = cast<SrcValueSDNode>(Op.
getOperand(2))->getValue();
4356 const Value *SV = cast<SrcValueSDNode>(Op.
getOperand(2))->getValue();
4416 return Subtarget->
isTargetDarwin() ? LowerDarwin_VASTART(Op, DAG)
4417 : LowerAAPCS_VASTART(Op, DAG);
4426 const Value *DestSV = cast<SrcValueSDNode>(Op.
getOperand(3))->getValue();
4427 const Value *SrcSV = cast<SrcValueSDNode>(Op.
getOperand(4))->getValue();
4438 "automatic va_arg instruction only works on Darwin");
4440 const Value *V = cast<SrcValueSDNode>(Op.
getOperand(2))->getValue();
4452 assert(((Align & (Align - 1)) == 0) &&
"Expected Align to be a power of 2");
4468 bool NeedFPTrunc =
false;
4504 unsigned Depth = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
4515 unsigned AArch64TargetLowering::getRegisterByName(
const char* RegName,
EVT VT,
4518 .Case(
"sp", AArch64::SP)
4534 unsigned Depth = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
4536 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
4544 unsigned Reg = MF.
addLiveIn(AArch64::LR, &AArch64::GPR64RegClass);
4574 HiBitsForLo, CCVal, Cmp);
4586 SDValue LoForBigShift = DAG.
getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
4588 LoForNormalShift, CCVal, Cmp);
4592 SDValue HiForNormalShift = DAG.
getNode(Opc, dl, VT, ShOpHi, ShAmt);
4595 ? DAG.
getNode(Opc, dl, VT, ShOpHi,
4599 HiForNormalShift, CCVal, Cmp);
4611 unsigned VTBits = VT.getSizeInBits();
4629 LoBitsForHi, CCVal, Cmp);
4643 HiForNormalShift, CCVal, Cmp);
4650 LoForNormalShift, CCVal, Cmp);
4670 else if (VT == MVT::f32)
4687 if (ExtraSteps == TargetLoweringBase::ReciprocalEstimate::Unspecified)
4693 ExtraSteps = VT ==
MVT::f64 ? 3 : 2;
4695 return DAG.
getNode(Opcode,
SDLoc(Operand), VT, Operand);
4705 bool Reciprocal)
const {
4707 (Enabled == ReciprocalEstimate::Unspecified && Subtarget->
useRSqrt()))
4718 for (
int i = ExtraSteps; i > 0; --
i) {
4734 VT,
Eq, Operand, Estimate);
4746 int &ExtraSteps)
const {
4758 for (
int i = ExtraSteps; i > 0; --
i) {
4798 const char *AArch64TargetLowering::LowerXConstraint(
EVT ConstraintVT)
const {
4823 AArch64TargetLowering::getConstraintType(
StringRef Constraint)
const {
4824 if (Constraint.
size() == 1) {
4825 switch (Constraint[0]) {
4846 AArch64TargetLowering::getSingleConstraintMatchWeight(
4847 AsmOperandInfo &
info,
const char *constraint)
const {
4849 Value *CallOperandVal = info.CallOperandVal;
4852 if (!CallOperandVal)
4856 switch (*constraint) {
4872 std::pair<unsigned, const TargetRegisterClass *>
4873 AArch64TargetLowering::getRegForInlineAsmConstraint(
4875 if (Constraint.
size() == 1) {
4876 switch (Constraint[0]) {
4879 return std::make_pair(0U, &AArch64::GPR64commonRegClass);
4880 return std::make_pair(0U, &AArch64::GPR32commonRegClass);
4883 return std::make_pair(0U, &AArch64::FPR16RegClass);
4885 return std::make_pair(0U, &AArch64::FPR32RegClass);
4887 return std::make_pair(0U, &AArch64::FPR64RegClass);
4889 return std::make_pair(0U, &AArch64::FPR128RegClass);
4895 return std::make_pair(0U, &AArch64::FPR128_loRegClass);
4900 return std::make_pair(
unsigned(AArch64::NZCV), &AArch64::CCRRegClass);
4904 std::pair<unsigned, const TargetRegisterClass *> Res;
4909 unsigned Size = Constraint.
size();
4910 if ((Size == 4 || Size == 5) && Constraint[0] ==
'{' &&
4911 tolower(Constraint[1]) ==
'v' && Constraint[Size - 1] ==
'}') {
4914 if (!Failed && RegNo >= 0 && RegNo <= 31) {
4919 Res.first = AArch64::FPR64RegClass.getRegister(RegNo);
4920 Res.second = &AArch64::FPR64RegClass;
4922 Res.first = AArch64::FPR128RegClass.getRegister(RegNo);
4923 Res.second = &AArch64::FPR128RegClass;
4934 void AArch64TargetLowering::LowerAsmOperandForConstraint(
4935 SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
4940 if (Constraint.length() != 1)
4943 char ConstraintLetter = Constraint[0];
4944 switch (ConstraintLetter) {
4974 switch (ConstraintLetter) {
4982 if (isUInt<12>(CVal) || isShiftedUInt<12, 12>(CVal))
4987 if (isUInt<12>(NVal) || isShiftedUInt<12, 12>(NVal)) {
5019 if ((CVal & 0xFFFF) == CVal)
5021 if ((CVal & 0xFFFF0000ULL) == CVal)
5024 if ((NCVal & 0xFFFFULL) == NCVal)
5026 if ((NCVal & 0xFFFF0000ULL) == NCVal)
5033 if ((CVal & 0xFFFFULL) == CVal)
5035 if ((CVal & 0xFFFF0000ULL) == CVal)
5037 if ((CVal & 0xFFFF00000000ULL) == CVal)
5039 if ((CVal & 0xFFFF000000000000ULL) == CVal)
5041 uint64_t NCVal = ~CVal;
5042 if ((NCVal & 0xFFFFULL) == NCVal)
5044 if ((NCVal & 0xFFFF0000ULL) == NCVal)
5046 if ((NCVal & 0xFFFF00000000ULL) == NCVal)
5048 if ((NCVal & 0xFFFF000000000000ULL) == NCVal)
5062 Ops.push_back(Result);
5114 struct ShuffleSourceInfo {
5129 ShuffleSourceInfo(
SDValue Vec)
5130 : Vec(Vec), MinElt(std::numeric_limits<unsigned>::max()), MaxElt(0),
5131 ShuffleVec(Vec), WindowBase(0), WindowScale(1) {}
5139 for (
unsigned i = 0; i < NumElts; ++
i) {
5157 unsigned EltNo = cast<ConstantSDNode>(V.
getOperand(1))->getZExtValue();
5164 if (Sources.
size() > 2)
5170 for (
auto &
Source : Sources) {
5171 EVT SrcEltTy =
Source.Vec.getValueType().getVectorElementType();
5172 if (SrcEltTy.
bitsLT(SmallestEltTy)) {
5173 SmallestEltTy = SrcEltTy;
5176 unsigned ResMultiplier =
5184 for (
auto &Src : Sources) {
5185 EVT SrcVT = Src.ShuffleVec.getValueType();
5202 DAG.
getUNDEF(Src.ShuffleVec.getValueType()));
5208 if (Src.MaxElt - Src.MinElt >= NumSrcElts) {
5213 if (Src.MinElt >= NumSrcElts) {
5218 Src.WindowBase = -NumSrcElts;
5219 }
else if (Src.MaxElt < NumSrcElts) {
5237 Src.WindowBase = -Src.MinElt;
5244 for (
auto &Src : Sources) {
5246 if (SrcEltTy == SmallestEltTy)
5251 Src.WindowBase *= Src.WindowScale;
5256 for (
auto Src : Sources)
5257 assert(Src.ShuffleVec.getValueType() == ShuffleVT);
5269 int EltNo = cast<ConstantSDNode>(Entry.
getOperand(1))->getSExtValue();
5277 int LanesDefined = BitsDefined / BitsPerShuffleLane;
5281 int *LaneMask = &Mask[i * ResMultiplier];
5283 int ExtractBase = EltNo * Src->WindowScale + Src->WindowBase;
5284 ExtractBase += NumElts * (Src - Sources.begin());
5285 for (
int j = 0; j < LanesDefined; ++j)
5286 LaneMask[j] = ExtractBase + j;
5294 for (
unsigned i = 0; i < Sources.size(); ++
i)
5295 ShuffleOps[i] = Sources[i].ShuffleVec;
5298 ShuffleOps[1], Mask);
5316 unsigned ExpectedElt = Imm;
5317 for (
unsigned i = 1; i < NumElts; ++
i) {
5321 if (ExpectedElt == NumElts)
5326 if (ExpectedElt != static_cast<unsigned>(M[i]))
5338 const int *FirstRealElt =
find_if(M, [](
int Elt) {
return Elt >= 0; });
5343 APInt ExpectedElt =
APInt(MaskBits, *FirstRealElt + 1);
5347 [&](
int Elt) {
return Elt != ExpectedElt++ && Elt != -1;});
5348 if (FirstWrongElt != M.
end())
5377 assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) &&
5378 "Only possible block sizes for REV are: 16, 32, 64");
5385 unsigned BlockElts = M[0] + 1;
5388 BlockElts = BlockSize / EltSz;
5390 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
5393 for (
unsigned i = 0; i < NumElts; ++
i) {
5396 if ((
unsigned)M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts))
5405 WhichResult = (M[0] == 0 ? 0 : 1);
5406 unsigned Idx = WhichResult * NumElts / 2;
5407 for (
unsigned i = 0; i != NumElts; i += 2) {
5408 if ((M[i] >= 0 && (
unsigned)M[i] != Idx) ||
5409 (M[i + 1] >= 0 && (
unsigned)M[i + 1] != Idx + NumElts))
5419 WhichResult = (M[0] == 0 ? 0 : 1);
5420 for (
unsigned i = 0; i != NumElts; ++
i) {
5423 if ((
unsigned)M[
i] != 2 * i + WhichResult)
5432 WhichResult = (M[0] == 0 ? 0 : 1);
5433 for (
unsigned i = 0; i < NumElts; i += 2) {
5434 if ((M[i] >= 0 && (
unsigned)M[i] != i + WhichResult) ||
5435 (M[i + 1] >= 0 && (
unsigned)M[i + 1] != i + NumElts + WhichResult))
5446 WhichResult = (M[0] == 0 ? 0 : 1);
5447 unsigned Idx = WhichResult * NumElts / 2;
5448 for (
unsigned i = 0; i != NumElts; i += 2) {
5449 if ((M[i] >= 0 && (
unsigned)M[i] != Idx) ||
5450 (M[i + 1] >= 0 && (
unsigned)M[i + 1] != Idx))
5463 WhichResult = (M[0] == 0 ? 0 : 1);
5464 for (
unsigned j = 0; j != 2; ++j) {
5465 unsigned Idx = WhichResult;
5466 for (
unsigned i = 0; i != Half; ++
i) {
5467 int MIdx = M[i + j * Half];
5468 if (MIdx >= 0 && (
unsigned)MIdx != Idx)
5482 WhichResult = (M[0] == 0 ? 0 : 1);
5483 for (
unsigned i = 0; i < NumElts; i += 2) {
5484 if ((M[i] >= 0 && (
unsigned)M[i] != i + WhichResult) ||
5485 (M[i + 1] >= 0 && (
unsigned)M[i + 1] != i + WhichResult))
5492 bool &DstIsLeft,
int &Anomaly) {
5493 if (M.
size() !=
static_cast<size_t>(NumInputElements))
5496 int NumLHSMatch = 0, NumRHSMatch = 0;
5497 int LastLHSMismatch = -1, LastRHSMismatch = -1;
5499 for (
int i = 0; i < NumInputElements; ++
i) {
5509 LastLHSMismatch =
i;
5511 if (M[i] == i + NumInputElements)
5514 LastRHSMismatch =
i;
5517 if (NumLHSMatch == NumInputElements - 1) {
5519 Anomaly = LastLHSMismatch;
5521 }
else if (NumRHSMatch == NumInputElements - 1) {
5523 Anomaly = LastRHSMismatch;
5536 for (
int I = 0,
E = NumElts / 2; I !=
E; I++) {
5541 int Offset = NumElts / 2;
5542 for (
int I = NumElts / 2,
E = NumElts; I !=
E; I++) {
5543 if (Mask[I] != I + SplitLHS * Offset)
5584 unsigned OpNum = (PFEntry >> 26) & 0x0F;
5585 unsigned LHSID = (PFEntry >> 13) & ((1 << 13) - 1);
5586 unsigned RHSID = (PFEntry >> 0) & ((1 << 13) - 1);
5606 if (OpNum == OP_COPY) {
5607 if (LHSID == (1 * 9 + 2) * 9 + 3)
5609 assert(LHSID == ((4 * 9 + 5) * 9 + 6) * 9 + 7 &&
"Illegal OP_COPY!");
5643 else if (EltTy ==
MVT::i32 || EltTy == MVT::f32)
5653 return DAG.
getNode(Opcode, dl, VT, OpLHS, Lane);
5658 unsigned Imm = (OpNum - OP_VEXT1 + 1) *
getExtFactor(OpLHS);
5694 for (
int Val : ShuffleMask) {
5695 for (
unsigned Byte = 0; Byte < BytesPerElt; ++Byte) {
5696 unsigned Offset = Byte + Val * BytesPerElt;
5702 unsigned IndexLen = 8;
5721 if (IndexLen == 8) {
5750 if (EltType ==
MVT::i32 || EltType == MVT::f32)
5796 Lane += cast<ConstantSDNode>(V1.
getOperand(1))->getZExtValue();
5815 bool ReverseEXT =
false;
5817 if (
isEXTMask(ShuffleMask, VT, ReverseEXT, Imm)) {
5829 unsigned WhichResult;
5830 if (
isZIPMask(ShuffleMask, VT, WhichResult)) {
5832 return DAG.
getNode(Opc, dl, V1.getValueType(), V1,
V2);
5834 if (
isUZPMask(ShuffleMask, VT, WhichResult)) {
5836 return DAG.
getNode(Opc, dl, V1.getValueType(), V1,
V2);
5838 if (
isTRNMask(ShuffleMask, VT, WhichResult)) {
5840 return DAG.
getNode(Opc, dl, V1.getValueType(), V1,
V2);
5845 return DAG.
getNode(Opc, dl, V1.getValueType(), V1, V1);
5849 return DAG.
getNode(Opc, dl, V1.getValueType(), V1, V1);
5853 return DAG.
getNode(Opc, dl, V1.getValueType(), V1, V1);
5862 if (
isINSMask(ShuffleMask, NumInputElements, DstIsLeft, Anomaly)) {
5867 int SrcLane = ShuffleMask[Anomaly];
5868 if (SrcLane >= NumInputElements) {
5889 unsigned PFIndexes[4];
5890 for (
unsigned i = 0; i != 4; ++
i) {
5891 if (ShuffleMask[i] < 0)
5894 PFIndexes[
i] = ShuffleMask[
i];
5898 unsigned PFTableIndex = PFIndexes[0] * 9 * 9 * 9 + PFIndexes[1] * 9 * 9 +
5899 PFIndexes[2] * 9 + PFIndexes[3];
5901 unsigned Cost = (PFEntry >> 30);
5913 APInt SplatBits, SplatUndef;
5914 unsigned SplatBitSize;
5916 if (BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
5919 for (
unsigned i = 0; i < NumSplats; ++
i) {
5920 CnstBits <<= SplatBitSize;
5921 UndefBits <<= SplatBitSize;
5923 UndefBits |= (SplatBits ^ SplatUndef).zextOrTrunc(VT.
getSizeInBits());
5947 CnstBits = ~CnstBits;
5951 bool SecondTry =
false;
5954 if (CnstBits.getHiBits(64) == CnstBits.getLoBits(64)) {
6016 CnstBits = ~UndefBits;
6029 uint64_t &ConstVal) {
6038 for (
unsigned i = 1; i < NumElts; ++
i)
6039 if (dyn_cast<ConstantSDNode>(Bvec->
getOperand(i)) != FirstElt)
6051 unsigned IID = cast<ConstantSDNode>(N->
getOperand(0))->getZExtValue();
6099 if (C2 > ElemSizeInBits)
6101 unsigned ElemMask = (1 << ElemSizeInBits) - 1;
6102 if ((C1 & ElemMask) != (~C2 & ElemMask))
6109 IsShiftRight ? Intrinsic::aarch64_neon_vsri : Intrinsic::aarch64_neon_vsli;
6115 DEBUG(
dbgs() <<
"aarch64-lower: transformed: \n");
6151 bool SecondTry =
false;
6154 if (CnstBits.getHiBits(64) == CnstBits.getLoBits(64)) {
6155 CnstBits = CnstBits.zextOrTrunc(64);
6156 uint64_t CnstVal = CnstBits.getZExtValue();
6216 CnstBits = UndefBits;
6239 if (
auto *CstLane = dyn_cast<ConstantSDNode>(Lane)) {
6241 CstLane->getZExtValue());
6261 bool SecondTry =
false;
6264 if (CnstBits.getHiBits(64) == CnstBits.getLoBits(64)) {
6265 CnstBits = CnstBits.zextOrTrunc(64);
6266 uint64_t CnstVal = CnstBits.getZExtValue();
6272 if (VT.
isInteger() && (CnstVal == 0 || CnstVal == ~0ULL))
6465 CnstBits = UndefBits;
6482 bool isOnlyLowElement =
true;
6483 bool usesOnlyOneValue =
true;
6484 bool usesOnlyOneConstantValue =
true;
6485 bool isConstant =
true;
6486 unsigned NumConstantLanes = 0;
6489 for (
unsigned i = 0; i < NumElts; ++
i) {
6494 isOnlyLowElement =
false;
6495 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
6498 if (isa<ConstantSDNode>(V) || isa<ConstantFPSDNode>(V)) {
6502 else if (ConstantValue != V)
6503 usesOnlyOneConstantValue =
false;
6508 else if (V != Value)
6509 usesOnlyOneValue =
false;
6515 if (isOnlyLowElement)
6520 if (usesOnlyOneValue) {
6535 return DAG.
getNode(Opcode, dl, VT, Value, Lane);
6542 "Unsupported floating-point vector type");
6544 for (
unsigned i = 0; i < NumElts; ++
i)
6548 Val = LowerBUILD_VECTOR(Val, DAG);
6558 if (NumConstantLanes > 0 && usesOnlyOneConstantValue) {
6561 for (
unsigned i = 0; i < NumElts; ++
i) {
6564 if (!isa<ConstantSDNode>(V) && !isa<ConstantFPSDNode>(V)) {
6591 if (!isConstant && !usesOnlyOneValue) {
6603 (ElemSize == 32 || ElemSize == 64)) {
6604 unsigned SubIdx = ElemSize == 32 ? AArch64::ssub : AArch64::dsub;
6606 DAG.
getMachineNode(TargetOpcode::INSERT_SUBREG, dl, VT, Vec, Op0,
6611 for (; i < NumElts; ++
i) {
6625 SDValue AArch64TargetLowering::LowerINSERT_VECTOR_ELT(
SDValue Op,
6632 if (!CI || CI->
getZExtValue() >= VT.getVectorNumElements())
6659 AArch64TargetLowering::LowerEXTRACT_VECTOR_ELT(
SDValue Op,
6666 if (!CI || CI->
getZExtValue() >= VT.getVectorNumElements())
6695 SDValue AArch64TargetLowering::LowerEXTRACT_SUBVECTOR(
SDValue Op,
6726 unsigned PFIndexes[4];
6727 for (
unsigned i = 0; i != 4; ++
i) {
6731 PFIndexes[
i] = M[
i];
6735 unsigned PFTableIndex = PFIndexes[0] * 9 * 9 * 9 + PFIndexes[1] * 9 * 9 +
6736 PFIndexes[2] * 9 + PFIndexes[3];
6738 unsigned Cost = (PFEntry >> 30);
6746 unsigned DummyUnsigned;
6750 isEXTMask(M, VT, DummyBool, DummyUnsigned) ||
6769 APInt SplatBits, SplatUndef;
6770 unsigned SplatBitSize;
6772 if (!BVN || !BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
6773 HasAnyUndefs, ElementBits) ||
6774 SplatBitSize > ElementBits)
6785 assert(VT.
isVector() &&
"vector shift count is not a vector type");
6789 return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits);
6796 assert(VT.
isVector() &&
"vector shift count is not a vector type");
6800 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits / 2 : ElementBits));
6803 SDValue AArch64TargetLowering::LowerVectorSRA_SRL_SHL(
SDValue Op,
6839 : Intrinsic::aarch64_neon_ushl;
6846 return NegShiftLeft;
6857 "function only supposed to emit natural comparisons");
6863 bool IsZero = IsCnst && (CnstBits == 0);
7003 unsigned Intrinsic)
const {
7005 switch (Intrinsic) {
7006 case Intrinsic::aarch64_neon_ld2:
7007 case Intrinsic::aarch64_neon_ld3:
7008 case Intrinsic::aarch64_neon_ld4:
7009 case Intrinsic::aarch64_neon_ld1x2:
7010 case Intrinsic::aarch64_neon_ld1x3:
7011 case Intrinsic::aarch64_neon_ld1x4:
7012 case Intrinsic::aarch64_neon_ld2lane:
7013 case Intrinsic::aarch64_neon_ld3lane:
7014 case Intrinsic::aarch64_neon_ld4lane:
7015 case Intrinsic::aarch64_neon_ld2r:
7016 case Intrinsic::aarch64_neon_ld3r:
7017 case Intrinsic::aarch64_neon_ld4r: {
7020 uint64_t NumElts = DL.getTypeSizeInBits(I.
getType()) / 64;
7030 case Intrinsic::aarch64_neon_st2:
7031 case Intrinsic::aarch64_neon_st3:
7032 case Intrinsic::aarch64_neon_st4:
7033 case Intrinsic::aarch64_neon_st1x2:
7034 case Intrinsic::aarch64_neon_st1x3:
7035 case Intrinsic::aarch64_neon_st1x4:
7036 case Intrinsic::aarch64_neon_st2lane:
7037 case Intrinsic::aarch64_neon_st3lane:
7038 case Intrinsic::aarch64_neon_st4lane: {
7041 unsigned NumElts = 0;
7046 NumElts += DL.getTypeSizeInBits(ArgTy) / 64;
7057 case Intrinsic::aarch64_ldaxr:
7058 case Intrinsic::aarch64_ldxr: {
7070 case Intrinsic::aarch64_stlxr:
7071 case Intrinsic::aarch64_stxr: {
7083 case Intrinsic::aarch64_ldaxp:
7084 case Intrinsic::aarch64_ldxp:
7094 case Intrinsic::aarch64_stlxp:
7095 case Intrinsic::aarch64_stxp:
7118 return NumBits1 > NumBits2;
7125 return NumBits1 > NumBits2;
7132 if (I->
getOpcode() != Instruction::FMul)
7141 !(User->
getOpcode() == Instruction::FSub ||
7142 User->
getOpcode() == Instruction::FAdd))
7162 return NumBits1 == 32 && NumBits2 == 64;
7169 return NumBits1 == 32 && NumBits2 == 64;
7187 bool AArch64TargetLowering::isExtFreeImpl(
const Instruction *
Ext)
const {
7188 if (isa<FPExtInst>(Ext))
7195 for (
const Use &U : Ext->
uses()) {
7200 const Instruction *Instr = cast<Instruction>(U.getUser());
7204 case Instruction::Shl:
7208 case Instruction::GetElementPtr: {
7221 if (ShiftAmt == 0 || ShiftAmt > 4)
7225 case Instruction::Trunc:
7242 unsigned &RequiredAligment)
const {
7247 RequiredAligment = 0;
7249 return NumBits == 32 || NumBits == 64;
7267 "Invalid interleave factor");
7268 assert(!Shuffles.
empty() &&
"Empty shufflevector input");
7270 "Unmatched number of shufflevectors and indices");
7275 unsigned VecSize = DL.getTypeSizeInBits(VecTy);
7278 if (!Subtarget->
hasNEON() || (VecSize != 64 && VecSize != 128))
7289 Type *Tys[2] = {VecTy, PtrTy};
7290 static const Intrinsic::ID LoadInts[3] = {Intrinsic::aarch64_neon_ld2,
7291 Intrinsic::aarch64_neon_ld3,
7292 Intrinsic::aarch64_neon_ld4};
7299 CallInst *LdN = Builder.CreateCall(LdNFunc, Ptr,
"ldN");
7303 for (
unsigned i = 0; i < Shuffles.
size(); i++) {
7305 unsigned Index = Indices[
i];
7307 Value *SubVec = Builder.CreateExtractValue(LdN, Index);
7311 SubVec = Builder.CreateIntToPtr(SubVec, SVI->
getType());
7325 for (
unsigned i = 0; i < NumElts; i++)
7359 unsigned Factor)
const {
7361 "Invalid interleave factor");
7365 "Invalid interleaved store");
7375 if (!Subtarget->
hasNEON() || (SubVecSize != 64 && SubVecSize != 128))
7386 unsigned NumOpElts =
7398 Type *Tys[2] = {SubVecTy, PtrTy};
7399 static const Intrinsic::ID StoreInts[3] = {Intrinsic::aarch64_neon_st2,
7400 Intrinsic::aarch64_neon_st3,
7401 Intrinsic::aarch64_neon_st4};
7409 for (
unsigned i = 0; i < Factor; i++) {
7414 unsigned StartMask = 0;
7415 for (
unsigned j = 1; j < LaneLen; j++) {
7416 if (Mask[j*Factor + i] >= 0) {
7417 StartMask = Mask[j*Factor +
i] - j;
7437 unsigned AlignCheck) {
7438 return ((SrcAlign == 0 || SrcAlign % AlignCheck == 0) &&
7439 (DstAlign == 0 || DstAlign % AlignCheck == 0));
7443 unsigned SrcAlign,
bool IsMemset,
7452 if (Subtarget->
hasFPARMv8() && !IsMemset && Size >= 16 &&
7478 return ((Immed >> 12) == 0 || ((Immed & 0xfff) == 0 && Immed >> 24 == 0));
7491 unsigned AS)
const {
7509 uint64_t NumBytes = 0;
7512 NumBytes = NumBits / 8;
7521 if (isInt<9>(Offset))
7525 unsigned shift =
Log2_64(NumBytes);
7526 if (NumBytes && Offset > 0 && (Offset / NumBytes) <= (1LL << 12) - 1 &&
7528 (Offset >> shift) << shift == Offset)
7535 return AM.
Scale == 1 || (AM.
Scale > 0 && (uint64_t)AM.
Scale == NumBytes);
7540 unsigned AS)
const {
7577 static const MCPhysReg ScratchRegs[] = {
7578 AArch64::X16, AArch64::X17, AArch64::LR, 0
7611 if ((int64_t)Val < 0)
7614 Val &= (1LL << 32) - 1;
7617 unsigned Shift = (63 - LZ) / 16;
7643 if (!ShiftAmt || ShiftAmt->getZExtValue() != ShiftEltTy.getSizeInBits() - 1)
7690 AArch64TargetLowering::BuildSDIVPow2(
SDNode *N,
const APInt &Divisor,
7692 std::vector<SDNode *> *Created)
const {
7700 !(Divisor.
isPowerOf2() || (-Divisor).isPowerOf2()))
7716 Created->push_back(Cmp.
getNode());
7717 Created->push_back(Add.
getNode());
7718 Created->push_back(CSel.
getNode());
7731 Created->push_back(SRA.
getNode());
7761 unsigned TrailingZeroes = ConstValue.countTrailingZeros();
7762 if (TrailingZeroes) {
7776 APInt ShiftedConstValue = ConstValue.
ashr(TrailingZeroes);
7778 unsigned ShiftAmt, AddSubOpc;
7780 bool ShiftValUseIsN0 =
true;
7782 bool NegateResult =
false;
7784 if (ConstValue.isNonNegative()) {
7788 APInt SCVMinus1 = ShiftedConstValue - 1;
7789 APInt CVPlus1 = ConstValue + 1;
7801 APInt CVNegPlus1 = -ConstValue + 1;
7802 APInt CVNegMinus1 = -ConstValue - 1;
7806 ShiftValUseIsN0 =
false;
7810 NegateResult =
true;
7820 SDValue AddSubN0 = ShiftValUseIsN0 ? ShiftedVal : N0;
7821 SDValue AddSubN1 = ShiftValUseIsN0 ? N0 : ShiftedVal;
7822 SDValue Res = DAG.
getNode(AddSubOpc, DL, VT, AddSubN0, AddSubN1);
7823 assert(!(NegateResult && TrailingZeroes) &&
7824 "NegateResult and TrailingZeroes cannot both be true for now.");
7861 if (!BV->isConstant())
7866 EVT IntVT = BV->getValueType(0);
7889 if (VT != MVT::f32 && VT !=
MVT::f64)
7934 if (!isa<BuildVectorSDNode>(ConstVec))
7939 if (FloatBits != 32 && FloatBits != 64)
7944 if (IntBits != 16 && IntBits != 32 && IntBits != 64)
7948 if (IntBits > FloatBits)
7953 int32_t
Bits = IntBits == 64 ? 64 : 32;
7955 if (C == -1 || C == 0 || C > Bits)
7975 "Illegal vector type after legalization");
7979 unsigned IntrinsicOpcode = IsSigned ? Intrinsic::aarch64_neon_vcvtfp2fxs
7980 : Intrinsic::aarch64_neon_vcvtfp2fxu;
7986 if (IntBits < FloatBits)
8008 if (!isa<BuildVectorSDNode>(ConstVec))
8013 if (IntBits != 16 && IntBits != 32 && IntBits != 64)
8018 if (FloatBits != 32 && FloatBits != 64)
8022 if (IntBits > FloatBits)
8028 if (C == -1 || C == 0 || C > FloatBits)
8050 if (IntBits < FloatBits)
8054 unsigned IntrinsicOpcode = IsSigned ? Intrinsic::aarch64_neon_vcvtfxs2fp
8055 : Intrinsic::aarch64_neon_vcvtfxu2fp;
8098 bool LHSFromHi =
false;
8104 bool RHSFromHi =
false;
8110 if (LHSFromHi == RHSFromHi)
8145 uint64_t BitMask = Bits == 64 ? -1ULL : ((1ULL <<
Bits) - 1);
8146 for (
int i = 1; i >= 0; --
i)
8147 for (
int j = 1; j >= 0; --j) {
8153 bool FoundMatch =
true;
8158 CN0->
getZExtValue() != (BitMask & ~CN1->getZExtValue())) {
8207 if (VT ==
MVT::i32 && ShiftAmt == 16 &&
8210 if (VT ==
MVT::i64 && ShiftAmt == 32 &&
8247 uint64_t idx = cast<ConstantSDNode>(Op0->
getOperand(1))->getZExtValue();
8252 if (idx != AArch64::dsub)
8267 DEBUG(
dbgs() <<
"aarch64-lower: bitcast extract_subvector simplification\n");
8314 for (
size_t i = 0; i < Mask.size(); ++
i)
8355 DEBUG(
dbgs() <<
"aarch64-lower: concat_vectors bitcast simplification\n");
8397 "unexpected vector size on extract_vector_elt!");
8523 cast<ConstantSDNode>(Op.
getOperand(2))->getZExtValue());
8532 if (!TValue || !FValue)
8536 if (!TValue->
isOne()) {
8542 return TValue->
isOne() && FValue->isNullValue();
8668 "unexpected shape for long operation");
8691 int64_t ShiftAmount;
8693 APInt SplatValue, SplatUndef;
8694 unsigned SplatBitSize;
8697 HasAnyUndefs, ElemBits) ||
8698 SplatBitSize != ElemBits)
8703 ShiftAmount = CVN->getSExtValue();
8712 case Intrinsic::aarch64_neon_sqshl:
8714 IsRightShift =
false;
8716 case Intrinsic::aarch64_neon_uqshl:
8718 IsRightShift =
false;
8720 case Intrinsic::aarch64_neon_srshl:
8722 IsRightShift =
true;
8724 case Intrinsic::aarch64_neon_urshl:
8726 IsRightShift =
true;
8728 case Intrinsic::aarch64_neon_sqshlu:
8730 IsRightShift =
false;
8734 if (IsRightShift && ShiftAmount <= -1 && ShiftAmount >= -(
int)ElemBits) {
8738 }
else if (!IsRightShift && ShiftAmount >= 0 && ShiftAmount < ElemBits) {
8781 case Intrinsic::aarch64_neon_vcvtfxs2fp:
8782 case Intrinsic::aarch64_neon_vcvtfxu2fp:
8784 case Intrinsic::aarch64_neon_saddv:
8786 case Intrinsic::aarch64_neon_uaddv:
8788 case Intrinsic::aarch64_neon_sminv:
8790 case Intrinsic::aarch64_neon_uminv:
8792 case Intrinsic::aarch64_neon_smaxv:
8794 case Intrinsic::aarch64_neon_umaxv:
8796 case Intrinsic::aarch64_neon_fmax:
8799 case Intrinsic::aarch64_neon_fmin:
8802 case Intrinsic::aarch64_neon_fmaxnm:
8805 case Intrinsic::aarch64_neon_fminnm:
8808 case Intrinsic::aarch64_neon_smull:
8809 case Intrinsic::aarch64_neon_umull:
8810 case Intrinsic::aarch64_neon_pmull:
8811 case Intrinsic::aarch64_neon_sqdmull:
8813 case Intrinsic::aarch64_neon_sqshl:
8814 case Intrinsic::aarch64_neon_uqshl:
8815 case Intrinsic::aarch64_neon_sqshlu:
8816 case Intrinsic::aarch64_neon_srshl:
8817 case Intrinsic::aarch64_neon_urshl:
8819 case Intrinsic::aarch64_crc32b:
8820 case Intrinsic::aarch64_crc32cb:
8822 case Intrinsic::aarch64_crc32h:
8823 case Intrinsic::aarch64_crc32ch:
8840 if (IID == Intrinsic::aarch64_neon_sabd ||
8841 IID == Intrinsic::aarch64_neon_uabd) {
8909 assert(!(NumElements & 1) &&
"Splitting vector, but not in half!");
8928 SDValue SplatVal,
unsigned NumVecElts) {
8943 unsigned Offset = EltOffset;
8944 while (--NumVecElts) {
8945 unsigned Alignment =
MinAlign(OrigAlignment, Offset);
8949 PtrInfo.getWithOffset(Offset), Alignment,
8951 Offset += EltOffset;
8978 if (!(((NumVecElts == 2 || NumVecElts == 3) &&
8980 ((NumVecElts == 2 || NumVecElts == 3 || NumVecElts == 4) &&
8997 if (Offset < -512 || Offset > 504)
9001 for (
int I = 0; I < NumVecElts; ++
I) {
9031 if (NumVecElts != 4 && NumVecElts != 2)
9037 std::bitset<4> IndexNotInserted((1 << NumVecElts) - 1);
9039 for (
unsigned I = 0; I < NumVecElts; ++
I) {
9055 if (IndexVal >= NumVecElts)
9057 IndexNotInserted.reset(IndexVal);
9062 if (IndexNotInserted.any())
9087 return ReplacedZeroSplat;
9118 return ReplacedSplat;
9151 unsigned LoadIdx = IsLaneOp ? 1 : 0;
9167 if (UI.getUse().getResNo() == 1)
9180 || UI.getUse().getResNo() != Addr.
getResNo())
9195 uint32_t IncVal = CInc->getZExtValue();
9197 if (IncVal != NumBytes)
9245 APInt KnownZero, KnownOne;
9249 if (TLI.SimplifyDemandedBits(Addr, DemandedMask, KnownZero, KnownOne, TLO)) {
9284 if (NumVecElts != 4)
9287 if (NumVecElts != 4 && NumVecElts != 8 && NumVecElts != 16)
9294 for (
int CurStep = 0; CurStep != NumExpectedSteps; ++CurStep) {
9309 if (CurOp.
getOpcode() != Op && (CurStep != (NumExpectedSteps - 1)))
9320 int NumMaskElts = 1 << CurStep;
9321 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Shuffle)->getMask();
9332 for (
int i = 0; i < NumVecElts; ++
i)
9333 if ((i < NumMaskElts && Mask[i] != (NumMaskElts + i)) ||
9334 (i >= NumMaskElts && !(Mask[i] < 0)))
9340 bool IsIntrinsic =
false;
9361 Opcode = Intrinsic::aarch64_neon_fmaxnmv;
9365 Opcode = Intrinsic::aarch64_neon_fminnmv;
9441 if (EltTy != MVT::f32)
9547 UI.getUse().getResNo() != Addr.
getResNo())
9556 bool IsStore =
false;
9557 bool IsLaneOp =
false;
9558 bool IsDupOp =
false;
9559 unsigned NewOpc = 0;
9560 unsigned NumVecs = 0;
9561 unsigned IntNo = cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue();
9571 NumVecs = 2; IsStore =
true;
break;
9573 NumVecs = 3; IsStore =
true;
break;
9575 NumVecs = 4; IsStore =
true;
break;
9583 NumVecs = 2; IsStore =
true;
break;
9585 NumVecs = 3; IsStore =
true;
break;
9587 NumVecs = 4; IsStore =
true;
break;
9589 NumVecs = 2; IsDupOp =
true;
break;
9591 NumVecs = 3; IsDupOp =
true;
break;
9593 NumVecs = 4; IsDupOp =
true;
break;
9595 NumVecs = 2; IsLaneOp =
true;
break;
9597 NumVecs = 3; IsLaneOp =
true;
break;
9599 NumVecs = 4; IsLaneOp =
true;
break;
9601 NumVecs = 2; IsStore =
true; IsLaneOp =
true;
break;
9603 NumVecs = 3; IsStore =
true; IsLaneOp =
true;
break;
9605 NumVecs = 4; IsStore =
true; IsLaneOp =
true;
break;
9617 uint32_t IncVal = CInc->getZExtValue();
9619 if (IsLaneOp || IsDupOp)
9621 if (IncVal != NumBytes)
9628 if (IsLaneOp || IsStore)
9629 for (
unsigned i = 2; i < AddrOpIdx; ++
i)
9636 unsigned NumResultVecs = (IsStore ? 0 : NumVecs);
9638 for (n = 0; n < NumResultVecs; ++n)
9650 std::vector<SDValue> NewResults;
9651 for (
unsigned i = 0; i < NumResultVecs; ++
i) {
9652 NewResults.push_back(
SDValue(UpdN.getNode(),
i));
9654 NewResults.push_back(
SDValue(UpdN.getNode(), NumResultVecs + 1));
9775 int MaxUInt = (1 << width);
9783 AddConstant -= (1 << (width-1));
9788 if ((AddConstant == 0) ||
9789 (CompConstant == MaxUInt - 1 && AddConstant < 0) ||
9790 (AddConstant >= 0 && CompConstant < 0) ||
9791 (AddConstant <= 0 && CompConstant <= 0 && CompConstant < AddConstant))
9796 if ((AddConstant == 0) ||
9797 (AddConstant >= 0 && CompConstant <= 0) ||
9798 (AddConstant <= 0 && CompConstant <= 0 && CompConstant <= AddConstant))
9803 if ((AddConstant >= 0 && CompConstant < 0) ||
9804 (AddConstant <= 0 && CompConstant >= -1 &&
9805 CompConstant < AddConstant + MaxUInt))
9810 if ((AddConstant == 0) ||
9811 (AddConstant > 0 && CompConstant <= 0) ||
9812 (AddConstant < 0 && CompConstant <= AddConstant))
9817 if ((AddConstant >= 0 && CompConstant <= 0) ||
9818 (AddConstant <= 0 && CompConstant >= 0 &&
9819 CompConstant <= AddConstant + MaxUInt))
9824 if ((AddConstant > 0 && CompConstant < 0) ||
9825 (AddConstant < 0 && CompConstant >= 0 &&
9826 CompConstant < AddConstant + MaxUInt) ||
9827 (AddConstant >= 0 && CompConstant >= 0 &&
9828 CompConstant >= AddConstant) ||
9829 (AddConstant <= 0 && CompConstant < 0 && CompConstant < AddConstant))
9848 unsigned CmpIndex) {
9849 unsigned CC = cast<ConstantSDNode>(N->
getOperand(CCIndex))->getSExtValue();
9851 unsigned CondOpcode = SubsNode->
getOpcode();
9860 unsigned MaskBits = 0;
9869 else if (CNV == 65535)
9890 if (!isa<ConstantSDNode>(AddInputValue2.
getNode()) ||
9891 !isa<ConstantSDNode>(SubsInputValue.
getNode()))
9902 cast<ConstantSDNode>(AddInputValue2.
getNode())->getSExtValue(),
9903 cast<ConstantSDNode>(SubsInputValue.
getNode())->getSExtValue()))
9929 assert(isa<ConstantSDNode>(CCVal) &&
"Expected a ConstantSDNode here!");
9930 unsigned CC = cast<ConstantSDNode>(CCVal)->getZExtValue();
9947 "Expected the value type to be the same for both operands!");
10015 (Bit - C->
getZExtValue()) < Op->getValueType(0).getSizeInBits()) {
10024 if (Bit >= Op->getValueType(0).getSizeInBits())
10025 Bit = Op->getValueType(0).getSizeInBits() - 1;
10030 if ((Bit + C->
getZExtValue()) < Op->getValueType(0).getSizeInBits()) {
10048 unsigned Bit = cast<ConstantSDNode>(N->
getOperand(2))->getZExtValue();
10049 bool Invert =
false;
10053 if (TestSrc == NewTestSrc)
10096 cast<CondCodeSDNode>(N0.
getOperand(2))->
get());
10118 "Scalar-SETCC feeding SELECT has unexpected result type!");
10131 if (!ResVT.
isVector() || NumMaskElts == 0)
10239 switch (cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue()) {
10240 case Intrinsic::aarch64_neon_ld2:
10241 case Intrinsic::aarch64_neon_ld3:
10242 case Intrinsic::aarch64_neon_ld4:
10243 case Intrinsic::aarch64_neon_ld1x2:
10244 case Intrinsic::aarch64_neon_ld1x3:
10245 case Intrinsic::aarch64_neon_ld1x4:
10246 case Intrinsic::aarch64_neon_ld2lane:
10247 case Intrinsic::aarch64_neon_ld3lane:
10248 case Intrinsic::aarch64_neon_ld4lane:
10249 case Intrinsic::aarch64_neon_ld2r:
10250 case Intrinsic::aarch64_neon_ld3r:
10251 case Intrinsic::aarch64_neon_ld4r:
10252 case Intrinsic::aarch64_neon_st2:
10253 case Intrinsic::aarch64_neon_st3:
10254 case Intrinsic::aarch64_neon_st4:
10255 case Intrinsic::aarch64_neon_st1x2:
10256 case Intrinsic::aarch64_neon_st1x3:
10257 case Intrinsic::aarch64_neon_st1x4:
10258 case Intrinsic::aarch64_neon_st2lane:
10259 case Intrinsic::aarch64_neon_st3lane:
10260 case Intrinsic::aarch64_neon_st4lane:
10273 bool AArch64TargetLowering::isUsedByReturnOnly(
SDNode *N,
10292 bool HasRet =
false;
10310 bool AArch64TargetLowering::mayBeEmittedAsTailCall(
CallInst *CI)
const {
10314 bool AArch64TargetLowering::getIndexedAddressParts(
SDNode *Op,
SDValue &Base,
10326 int64_t RHSC = RHS->getSExtValue();
10328 RHSC = -(uint64_t)RHSC;
10329 if (!isInt<9>(RHSC))
10338 bool AArch64TargetLowering::getPreIndexedAddressParts(
SDNode *N,
SDValue &Base,
10344 if (
LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
10345 VT = LD->getMemoryVT();
10346 Ptr = LD->getBasePtr();
10348 VT =
ST->getMemoryVT();
10349 Ptr =
ST->getBasePtr();
10354 if (!getIndexedAddressParts(Ptr.
getNode(), Base,
Offset, AM, IsInc, DAG))
10360 bool AArch64TargetLowering::getPostIndexedAddressParts(
10365 if (
LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
10366 VT = LD->getMemoryVT();
10367 Ptr = LD->getBasePtr();
10369 VT =
ST->getMemoryVT();
10370 Ptr =
ST->getBasePtr();
10375 if (!getIndexedAddressParts(Op, Base, Offset, AM, IsInc, DAG))
10405 unsigned AcrossOp) {
10422 return std::make_pair(Lo, Hi);
10429 "AtomicCmpSwap on types less than 128 should be legal");
10435 AArch64::CMP_SWAP_128,
SDLoc(N),
10440 MemOp[0] = cast<MemSDNode>(
N)->getMemOperand();
10441 cast<MachineSDNode>(CmpSwap)->setMemRefs(MemOp, MemOp + 1);
10448 void AArch64TargetLowering::ReplaceNodeResults(
10491 unsigned AArch64TargetLowering::combineRepeatedFPDivisors()
const {
10514 return Size == 128;
10546 Type *ValTy = cast<PointerType>(Addr->
getType())->getElementType();
10554 IsAcquire ? Intrinsic::aarch64_ldaxp : Intrinsic::aarch64_ldxp;
10558 Value *LoHi = Builder.
CreateCall(Ldxr, Addr,
"lohi");
10570 IsAcquire ? Intrinsic::aarch64_ldaxr : Intrinsic::aarch64_ldxr;
10575 cast<PointerType>(Addr->
getType())->getElementType());
10585 Value *Val, Value *Addr,
10595 IsRelease ? Intrinsic::aarch64_stlxp : Intrinsic::aarch64_stxp;
10599 Value *Lo = Builder.
CreateTrunc(Val, Int64Ty,
"lo");
10606 IsRelease ? Intrinsic::aarch64_stlxr : Intrinsic::aarch64_stxr;
10616 bool AArch64TargetLowering::functionArgumentNeedsConsecutiveRegisters(
10621 bool AArch64TargetLowering::shouldNormalizeToSelectSequence(
LLVMContext &,
10633 const unsigned TlsOffset = 0x28;
10649 const unsigned TlsOffset = 0x48;
10677 if (AArch64::GPR64RegClass.
contains(*I))
10678 RC = &AArch64::GPR64RegClass;
10679 else if (AArch64::FPR64RegClass.
contains(*I))
10680 RC = &AArch64::FPR64RegClass;
10691 Attribute::NoUnwind) &&
10692 "Function should be nounwind in insertCopiesSplitCSR!");
10698 for (
auto *Exit : Exits)
10700 TII->
get(TargetOpcode::COPY), *
I)
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
static bool isAdvSIMDModImmType6(uint64_t Imm)
static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt)
isVShiftLImm - Check if this is a valid build_vector for the immediate operand of a vector shift left...
void setFrameAddressIsTaken(bool T)
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< SDValue > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
std::enable_if< std::numeric_limits< T >::is_signed, bool >::type getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
constexpr bool isUInt< 32 >(uint64_t x)
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
APInt ashr(unsigned shiftAmt) const
Arithmetic right-shift function.
Value * getValueOperand()
Helper structure to keep track of SetCC information.
static MVT getIntegerVT(unsigned BitWidth)
static bool isUZP_v_undef_Mask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
isUZP_v_undef_Mask - Special case of isUZPMask for canonical form of "vector_shuffle v...
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
void push_back(const T &Elt)
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
A parsed version of the target data layout string in and methods for querying it. ...
Type * getIndexedType() const
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
static SDValue performBitcastCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
SDValue getValue(unsigned R) const
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
MO_G3 - A symbol operand with this flag (granule 3) represents the high 16-bits of a 64-bit address...
Value * CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const Twine &Name="")
static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG)
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
iterator_range< use_iterator > uses()
bool isTargetAndroid() const
bool requiresStrictAlign() const
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand...
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
Flags getFlags() const
Return the raw flags of the source value,.
LLVMContext * getContext() const
static uint8_t encodeAdvSIMDModImmType3(uint64_t Imm)
bool isOSBinFormatMachO() const
Tests whether the environment is MachO.
uint64_t getZExtValue() const
Get zero extended value.
DiagnosticInfoOptimizationBase::Argument NV
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
void setVarArgsGPRSize(unsigned Size)
unsigned getVarArgsFPRSize() const
SDValue CombineTo(SDNode *N, ArrayRef< SDValue > To, bool AddTo=true)
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd)...
static SDValue performVSelectCombine(SDNode *N, SelectionDAG &DAG)
static SDValue performNEONPostLDSTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
Target-specific DAG combine function for NEON load/store intrinsics to merge base address updates...
int getVarArgsStackIndex() const
STATISTIC(NumFunctions,"Total number of functions")
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR (an vector value) starting with the ...
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
BR_CC - Conditional branch.
LocInfo getLocInfo() const
static bool isUZPMask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
MO_PAGE - A symbol operand with this flag represents the pc-relative offset of the 4K page containing...
static bool isTRN_v_undef_Mask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
isTRN_v_undef_Mask - Special case of isTRNMask for canonical form of "vector_shuffle v...
CCAssignFn * CCAssignFnForReturn(CallingConv::ID CC) const
Selects the correct CCAssignFn for a given CallingConvention value.
static MVT getVectorVT(MVT VT, unsigned NumElements)
bool hasOneUse() const
Return true if there is exactly one use of this node.
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
A Module instance is used to store all the information related to an LLVM module. ...
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
static SDValue tryCombineToEXTR(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
EXTR instruction extracts a contiguous chunk of bits from two existing registers viewed as a high/low...
static bool isAdvSIMDModImmType12(uint64_t Imm)
const TargetMachine & getTargetMachine() const
an instruction that atomically checks whether a specified value is in a memory location, and, if it is, stores a new value there.
static bool isAdvSIMDModImmType4(uint64_t Imm)
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
static SDValue performXorCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
static uint8_t encodeAdvSIMDModImmType1(uint64_t Imm)
bool isCalledByLegalizer() const
static bool isConjunctionDisjunctionTree(const SDValue Val, bool &CanNegate, unsigned Depth=0)
Returns true if Val is a tree of AND/OR/SETCC operations.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace=0, unsigned Align=1, bool *Fast=nullptr) const override
Returns true if the target allows unaligned memory accesses of the specified type.
const uint32_t * getTLSCallPreservedMask() const
int getSplatIndex() const
Carry-setting nodes for multiple precision addition and subtraction.
const TargetMachine & getTarget() const
static bool isINSMask(ArrayRef< int > M, int NumInputElements, bool &DstIsLeft, int &Anomaly)
static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG)
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(EVT VT) const override
Return the preferred vector type legalization action.
unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
static CondCode getInvertedCondCode(CondCode Code)
void setVarArgsStackIndex(int Index)
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
static bool isZIP_v_undef_Mask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
isZIP_v_undef_Mask - Special case of isZIPMask for canonical form of "vector_shuffle v...
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain...
This class represents a function call, abstracting a target machine's calling convention.
void computeKnownBits(SDValue Op, APInt &KnownZero, APInt &KnownOne, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in the KnownZero/KnownO...
std::pair< EVT, EVT > GetSplitDestVTs(const EVT &VT) const
Compute the VTs needed for the low/hi parts of a type which is split (or expanded) into two not neces...
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Get a value with low bits set.
const GlobalValue * getGlobal() const
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit...
static SDValue splitStores(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change...
LLVM_NODISCARD bool equals_lower(StringRef RHS) const
equals_lower - Check for string equality, ignoring case.
static MVT getFloatingPointVT(unsigned BitWidth)
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS) const override
Return the cost of the scaling factor used in the addressing mode represented by AM for this target...
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
std::pair< Value *, Value * > ShuffleOps
We are building a shuffle to create V, which is a sequence of insertelement, extractelement pairs...
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Function Alias Analysis Results
Type * getTypeForEVT(LLVMContext &Context) const
getTypeForEVT - This method returns an LLVM type corresponding to the specified EVT.
unsigned getSizeInBits() const
This instruction constructs a fixed permutation of two input vectors.
static SDValue performSelectCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
A vector select: "(select vL, vR, (setcc LHS, RHS))" is best performed with the compare-mask instruct...
void setVarArgsFPRIndex(int Index)
unsigned getByValSize() const
static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N, SelectionDAG &DAG)
unsigned getNumOperands() const
Return the number of values used by this operation.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
const Function * getParent() const
Return the enclosing method, or null if none.
unsigned getNumOperands() const
unsigned getValueSizeInBits() const
Returns the size of the value in bits.
static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG)
const SDValue & getOperand(unsigned Num) const
MO_G0 - A symbol operand with this flag (granule 0) represents the bits 0-15 of a 64-bit address...
const Function * getFunction() const
getFunction - Return the LLVM function that this machine code represents
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS)
Helper function to make it easier to build Select's if you just have operands and don't want to check...
static SDValue performExtendCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
static SDValue NormalizeBuildVector(SDValue Op, SelectionDAG &DAG)
uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew=0)
Returns the next integer (mod 2**64) that is greater than or equal to Value and is a multiple of Alig...
An instruction for reading from memory.
static SDValue emitComparison(SDValue LHS, SDValue RHS, ISD::CondCode CC, const SDLoc &dl, SelectionDAG &DAG)
static unsigned getDUPLANEOp(EVT EltType)
static IntegerType * getInt64Ty(LLVMContext &C)
static bool isAdvSIMDModImmType3(uint64_t Imm)
[US]{MIN/MAX} - Binary minimum or maximum or signed or unsigned integers.
an instruction that atomically reads a memory location, combines it with another value, and then stores the result back.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG)
static SDValue performSetccAddFolding(SDNode *Op, SelectionDAG &DAG)
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &ArgsFlags, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
This defines the Use class.
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
Type * getElementType() const
static SDValue EmitVectorComparison(SDValue LHS, SDValue RHS, AArch64CC::CondCode CC, bool NoNans, EVT VT, const SDLoc &dl, SelectionDAG &DAG)
bool hasAttribute(unsigned Index, Attribute::AttrKind Kind) const
Return true if the attribute exists at the given index.
static SDValue tryCombineFixedPointConvert(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
const SDValue & getBasePtr() const
bool lowerInterleavedLoad(LoadInst *LI, ArrayRef< ShuffleVectorInst * > Shuffles, ArrayRef< unsigned > Indices, unsigned Factor) const override
Lower an interleaved load into a ldN intrinsic.
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1 at the ...
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
std::size_t countLeadingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the most significant bit to the least stopping at the first 1...
void setVarArgsFPRSize(unsigned Size)
static bool isZeroExtended(SDNode *N, SelectionDAG &DAG)
static std::pair< SDValue, SDValue > getAArch64XALUOOp(AArch64CC::CondCode &CC, SDValue Op, SelectionDAG &DAG)
static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const SDLoc &dl)
GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit the specified operations t...
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
unsigned getResNo() const
get the index which selects a specific result in the SDNode
Libcall getFPROUND(EVT OpVT, EVT RetVT)
getFPROUND - Return the FPROUND_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
bool bitsLT(EVT VT) const
bitsLT - Return true if this has less bits than VT.
bool isUnsignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs an unsigned comparison when used with intege...
bool isUndef() const
Return true if the type of the node type undefined.
static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
bool optForMinSize() const
Optimize this function for minimum size (-Oz).
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
return AArch64::GPR64RegClass contains(Reg)
SDValue getExternalSymbol(const char *Sym, EVT VT)
bool isAllOnesValue() const
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool isOSWindows() const
Tests whether the OS is Windows.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...
Libcall getUINTTOFP(EVT OpVT, EVT RetVT)
getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
const Triple & getTargetTriple() const
MO_G2 - A symbol operand with this flag (granule 2) represents the bits 32-47 of a 64-bit address...
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations...
bool isVector() const
isVector - Return true if this is a vector value type.
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
static bool isAdvSIMDModImmType7(uint64_t Imm)
static SDValue tryFormConcatFromShuffle(SDValue Op, SelectionDAG &DAG)
The address of a basic block.
static bool isTRNMask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
cl::opt< bool > EnableAArch64ELFLocalDynamicTLSGeneration("aarch64-elf-ldtls-generation", cl::Hidden, cl::desc("Allow AArch64 Local Dynamic TLS code generation"), cl::init(false))
A description of a memory reference used in the backend.
static SDValue performTBZCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
static SDValue performCONDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG, unsigned CCIndex, unsigned CmpIndex)
struct fuzzer::@269 Flags
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
const HexagonInstrInfo * TII
static SDValue tryCombineCRC32(unsigned Mask, SDNode *N, SelectionDAG &DAG)
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
unsigned getMaximumJumpTableSize() const
Shift and rotation operations.
static SDValue performAddSubLongCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
Class to represent struct types.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
A Use represents the edge between a Value definition and its users.
static SDValue performAcrossLaneAddReductionCombine(SDNode *N, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
Target-specific DAG combine for the across vector add reduction.
static SDValue performFpToIntCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
Fold a floating-point multiply by power of two into floating-point to fixed-point conversion...
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
bool hasInternalLinkage() const
MachineFunction & getMachineFunction() const
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s), MachineInstr opcode, and operands.
static void advance(T &it, size_t Val)
static uint8_t encodeAdvSIMDModImmType6(uint64_t Imm)
unsigned getNumArgOperands() const
Return the number of call arguments.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
CopyToReg - This node has three operands: a chain, a register number to set to this value...
static Constant * get(ArrayRef< Constant * > V)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
LLVM_ATTRIBUTE_ALWAYS_INLINE R Default(const T &Value) const
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
Return the ISD::SETCC ValueType.
bool Eq(const uint8_t *Data, size_t Size, const char *Str)
Helper structure to keep track of a SET_CC lowered into AArch64 code.
Reg
All possible values of the reg field in the ModR/M byte.
This file contains the simple types necessary to represent the attributes associated with functions a...
MVT getScalarType() const
getScalarType - If this is a vector type, return the element type, otherwise return this...
The memory access is dereferenceable (i.e., doesn't trap).
static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG)
EVT getScalarType() const
getScalarType - If this is a vector type, return the element type, otherwise return this...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
static bool isLogicalImmediate(uint64_t imm, unsigned regSize)
isLogicalImmediate - Return true if the immediate is valid for a logical immediate instruction of the...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
static SDValue combineAcrossLanesIntrinsic(unsigned Opc, SDNode *N, SelectionDAG &DAG)
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
This class is used to represent EVT's, which are used to parameterize some operations.
static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
Turn vector tests of the signbit in the form of: xor (sra X, elt_size(X)-1), -1 into: cmge X...
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG...
Type * getVectorElementType() const
static bool isAdvSIMDModImmType5(uint64_t Imm)
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
bool isInteger() const
isInteger - Return true if this is an integer, or a vector integer type.
This file implements a class to represent arbitrary precision integral constant values and operations...
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_NODISCARD bool empty() const
AtomicOrdering
Atomic ordering for LLVM's memory model.
STACKSAVE - STACKSAVE has one operand, an input chain.
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
EVT getVectorElementType() const
getVectorElementType - Given a vector type, return the type of each element.
Value * getSafeStackPointerLocation(IRBuilder<> &IRB) const override
If the target has a standard location for the unsafe stack pointer, returns the address of that locat...
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const
virtual Value * getIRStackGuard(IRBuilder<> &IRB) const
If the target has a standard location for the stack protector guard, returns the address of that loca...
bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector...
static const unsigned PerfectShuffleTable[6561+1]
bool isInConsecutiveRegs() const
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
unsigned getLocReg() const
void setArgumentStackToRestore(unsigned bytes)
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
unsigned NoNaNsFPMath
NoNaNsFPMath - This flag is enabled when the -enable-no-nans-fp-math flag is specified on the command...
static SDValue performConcatVectorsCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
static bool isEXTMask(ArrayRef< int > M, EVT VT, bool &ReverseEXT, unsigned &Imm)
void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static bool checkValueWidth(SDValue V, unsigned width, ISD::LoadExtType &ExtType)
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
virtual Value * getSafeStackPointerLocation(IRBuilder<> &IRB) const
Returns the target-specific address of the unsafe stack pointer.
SDValue getRegisterMask(const uint32_t *RegMask)
constexpr bool isMask_64(uint64_t Value)
isMask_64 - This function returns true if the argument is a non-empty sequence of ones starting at th...
const AArch64RegisterInfo * getRegisterInfo() const override
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
Simple integer binary arithmetic operators.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
bool isFloatingPointTy() const
Return true if this is one of the six floating-point types.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
const SDValue & getBasePtr() const
static bool isAdvSIMDModImmType2(uint64_t Imm)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
static GCRegistry::Add< OcamlGC > B("ocaml","ocaml 3.10-compatible GC")
An instruction for storing to memory.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
const APInt & getAPIntValue() const
static Error getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS) const override
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
bool isArrayTy() const
True if this is an instance of ArrayType.
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification, or lowering of the constant.
EVT getMemoryVT() const
Return the type of the in-memory value.
CodeGenOpt::Level getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="")
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
bool isFPImmLegal(const APFloat &Imm, EVT VT) const override
Returns true if the target can instruction select the specified FP immediate natively.
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass...
bool isIntDivCheap(EVT VT, AttributeSet Attr) const override
Return true if integer divide is usually cheaper than a sequence of several shifts, adds, and multiplies for this target.
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG)
static const MCPhysReg GPRArgRegs[]
Value * CreateZExtOrBitCast(Value *V, Type *DestTy, const Twine &Name="")
void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero, APInt &KnownOne, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
size_t size() const
size - Get the array size.
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
const DataLayout & getDataLayout() const
void initializeSplitCSR(MachineBasicBlock *Entry) const override
Perform necessary initialization to handle a subset of CSRs explicitly via copies.
static SDValue WidenVector(SDValue V64Reg, SelectionDAG &DAG)
WidenVector - Given a value in the V64 register class, produce the equivalent value in the V128 regis...
Maximum length of the test input libFuzzer tries to guess a good value based on the corpus and reports it always prefer smaller inputs during the corpus shuffle When libFuzzer itself reports a bug this exit code will be used If indicates the maximal total time in seconds to run the fuzzer minimizes the provided crash input Use with etc Experimental Use value profile to guide fuzzing Number of simultaneous worker processes to run the jobs If min(jobs, NumberOfCpuCores()/2)\" is used.") FUZZER_FLAG_INT(reload
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset)
Stack pointer relative access.
static bool isREVMask(ArrayRef< int > M, EVT VT, unsigned BlockSize)
isREVMask - Check if a vector shuffle corresponds to a REV instruction with the specified blocksize...
static SDValue tryExtendDUPToExtractHigh(SDValue N, SelectionDAG &DAG)
Class to represent pointers.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
static bool isEssentiallyExtractSubvector(SDValue N)
This class is used to represent ISD::STORE nodes.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
static GCRegistry::Add< CoreCLRGC > E("coreclr","CoreCLR-compatible GC")
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
static SDValue performBRCONDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
Flag
These should be considered private to the implementation of the MCInstrDesc class.
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the specified, possibly variable...
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
TargetInstrInfo - Interface to description of machine instruction set.
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Get a value with high bits set.
bool isLegalICmpImmediate(int64_t) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
static bool isAdvSIMDModImmType9(uint64_t Imm)
void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, SDValue &NewRHS, ISD::CondCode &CCCode, const SDLoc &DL) const
Soften the operands of a comparison.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
SDNode * getNode() const
get the SDNode which holds the desired result
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
MinAlign - A and B are either alignments or offsets.
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
static SDValue tryCombineToBSL(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
unsigned getScalarSizeInBits() const
unsigned getStoreSize() const
getStoreSize - Return the number of bytes overwritten by a store of the specified value type...
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
A switch()-like statement whose cases are string literals.
Type * getParamType(unsigned i) const
Parameter type accessors.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set, or Regs.size() if they are all allocated.
initializer< Ty > init(const Ty &Val)
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
void addLiveIn(MCPhysReg PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
unsigned UnsafeFPMath
UnsafeFPMath - This flag is enabled when the -enable-unsafe-fp-math flag is specified on the command ...
bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override
Returns true if the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass into a ...
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fuse-fp-ops=xxx option.
Control flow instructions. These all have token chains.
MO_G1 - A symbol operand with this flag (granule 1) represents the bits 16-31 of a 64-bit address...
unsigned const MachineRegisterInfo * MRI
bool isZero() const
Return true if the value is positive or negative zero.
std::size_t countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the least significant bit to the most stopping at the first 1...
static int getFP32Imm(const APInt &Imm)
getFP32Imm - Return an 8-bit floating-point version of the 32-bit floating-point value.
unsigned getVectorNumElements() const
bool isZExtFree(Type *Ty1, Type *Ty2) const override
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
CodeModel::Model getCodeModel() const
Returns the code model.
MVT - Machine Value Type.
LLVM Basic Block Representation.
const SDValue & getOperand(unsigned i) const
The instances of the Type class are immutable: once they are created, they are never changed...
This is an important class for using LLVM in a threaded context.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type...
Simple binary floating point operators.
void setTargetDAGCombine(ISD::NodeType NT)
Targets should invoke this method for each target independent node that they want to provide a custom...
C - The default llvm calling convention, compatible with C.
bool isVectorTy() const
True if this is an instance of VectorType.
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align=0, bool Vol=false, bool ReadMem=true, bool WriteMem=true, unsigned Size=0)
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
bool isOperationLegalOrCustom(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
This is an important base class in LLVM.
void incNumLocalDynamicTLSAccesses()
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE...
bool isVector() const
isVector - Return true if this is a vector value type.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
int64_t getSExtValue() const
Get sign extended value.
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL...
const Constant * getConstVal() const
This file contains the declarations for the subclasses of Constant, which represent the different fla...
bool isFloatingPoint() const
isFloatingPoint - Return true if this is a FP, or a vector FP type.
const MachineOperand & getOperand(unsigned i) const
static const MVT MVT_CC
Value type used for condition codes.
bool isBeforeLegalizeOps() const
Carry-using nodes for multiple precision addition and subtraction.
static SDValue tryCombineLongOpWithDup(unsigned IID, SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
bool isTargetMachO() const
static SDValue performAcrossLaneMinMaxReductionCombine(SDNode *N, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
Target-specific DAG combine for the across vector min/max reductions.
bool isLittleEndian() const
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
std::pair< SDValue, SDValue > SplitVectorOperand(const SDNode *N, unsigned OpNo)
Split the node's operand with EXTRACT_SUBVECTOR and return the low/high part.
Libcall getFPTOSINT(EVT OpVT, EVT RetVT)
getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
static bool isSetCCOrZExtSetCC(const SDValue &Op, SetCCInfoAndKind &Info)
static EVT getExtensionTo64Bits(const EVT &OrigVT)
static std::pair< SDValue, SDValue > splitInt128(SDValue N, SelectionDAG &DAG)
static bool isAdvSIMDModImmType1(uint64_t Imm)
static mvt_range fp_valuetypes()
static void ReplaceReductionResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, unsigned InterOp, unsigned AcrossOp)
MachineBasicBlock * EmitF128CSEL(MachineInstr &MI, MachineBasicBlock *BB) const
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool isDesirableToCommuteWithShift(const SDNode *N) const override
Returns false if N is a bit extraction pattern of (X >> C) & Mask.
constexpr bool isPowerOf2_64(uint64_t Value)
isPowerOf2_64 - This function returns true if the argument is a power of two 0 (64 bit edition...
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This class provides iterator support for SDUse operands that use a specific SDNode.
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself...
static SDValue emitConjunctionDisjunctionTree(SelectionDAG &DAG, SDValue Val, AArch64CC::CondCode &OutCC)
Emit conjunction or disjunction tree with the CMP/FCMP followed by a chain of CCMP/CFCMP ops...
unsigned getBytesInStackArgArea() const
bool isBeforeLegalize() const
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang","erlang-compatible garbage collector")
unsigned getBitWidth() const
Return the number of bits in the APInt.
static void ReplaceCMP_SWAP_128Results(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG)
static bool mayTailCallThisCC(CallingConv::ID CC)
Return true if we might ever do TCO for calls with this calling convention.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
unsigned getOpcode() const
TRAP - Trapping instruction.
unsigned GuaranteedTailCallOpt
GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is specified on the commandline...
Value * getOperand(unsigned i) const
Value * getPointerOperand()
static bool isAdvSIMDModImmType8(uint64_t Imm)
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
void setPrefFunctionAlignment(unsigned Align)
Set the target's preferred function alignment.
static bool resultsCompatible(CallingConv::ID CalleeCC, CallingConv::ID CallerCC, MachineFunction &MF, LLVMContext &C, const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn CalleeFn, CCAssignFn CallerFn)
Returns true if the results of the two calling conventions are compatible.
static mvt_range vector_valuetypes()
virtual bool useLoadStackGuardNode() const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
self_iterator getIterator()
The memory access is non-temporal.
int getVarArgsGPRIndex() const
static MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
void setVarArgsGPRIndex(int Index)
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool Immutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
static void changeFPCCToAArch64CC(ISD::CondCode CC, AArch64CC::CondCode &CondCode, AArch64CC::CondCode &CondCode2)
changeFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64 CC.
bool empty() const
empty - Check if the array is empty.
use_iterator use_begin() const
Provide iteration support to walk over all uses of an SDNode.
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all...
bool isMisaligned128StoreSlow() const
const SDValue & getValue() const
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
unsigned MaxStoresPerMemmove
Specify maximum bytes of store instructions per memmove call.
Helper structure to be able to read SetCC information.
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo...
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
static SDValue addRequiredExtensionForVectorMULL(SDValue N, SelectionDAG &DAG, const EVT &OrigTy, const EVT &ExtTy, unsigned ExtOpcode)
static SDValue emitConjunctionDisjunctionTreeRec(SelectionDAG &DAG, SDValue Val, AArch64CC::CondCode &OutCC, bool Negate, SDValue CCOp, AArch64CC::CondCode Predicate)
Emit conjunction or disjunction tree with the CMP/FCMP followed by a chain of CCMP/CFCMP ops...
MO_HI12 - This flag indicates that a symbol operand represents the bits 13-24 of a 64-bit address...
EVT - Extended Value Type.
bool isIntN(unsigned N, int64_t x)
isIntN - Checks if an signed integer fits into the given (dynamic) bit width.
bool isPointerTy() const
True if this is an instance of PointerType.
std::vector< ArgListEntry > ArgListTy
static SDValue performIntToFpCombine(SDNode *N, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
unsigned getNextStackOffset() const
getNextStackOffset - Return the next stack offset such that all stack slots satisfy their alignment r...
static bool isSingletonEXTMask(ArrayRef< int > M, EVT VT, unsigned &Imm)
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
LLVMContext & getContext() const
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
This structure contains all information that is necessary for lowering calls.
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
static SDValue GenerateTBL(SDValue Op, ArrayRef< int > ShuffleMask, SelectionDAG &DAG)
bool predictableSelectIsExpensive() const
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements)
getVectorVT - Returns the EVT that represents a vector NumElements in length, where each element is o...
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, bool isSigned, const SDLoc &dl, bool doesNotReturn=false, bool isReturnValueUsed=true) const
Returns a pair of (return value, chain).
This class contains a discriminated union of information about pointers in memory operands...
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
MO_TLS - Indicates that the operand being accessed is some kind of thread-local symbol.
unsigned char ClassifyGlobalReference(const GlobalValue *GV, const TargetMachine &TM) const
ClassifyGlobalReference - Find the target operand flags that describe how a global value should be re...
static SDValue LowerXOR(SDValue Op, SelectionDAG &DAG)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
static uint8_t encodeAdvSIMDModImmType12(uint64_t Imm)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="")
static SDValue performSTORECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG)
NarrowVector - Given a value in the V128 register class, produce the equivalent value in the V64 regi...
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, unsigned Alignment=0, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands...
Triple - Helper class for working with autoconf configuration names.
static bool isReleaseOrStronger(AtomicOrdering ao)
const MachinePointerInfo & getPointerInfo() const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
unsigned getByValAlign() const
static bool findEXTRHalf(SDValue N, SDValue &Src, uint32_t &ShiftAmount, bool &FromHi)
An EXTR instruction is made up of two shifts, ORed together.
static const int BlockSize
static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
void setHasExtractBitsInsn(bool hasExtractInsn=true)
Tells the code generator that the target has BitExtract instructions.
static cl::opt< bool > EnableAArch64SlrGeneration("aarch64-shift-insert-generation", cl::Hidden, cl::desc("Allow AArch64 SLI/SRI formation"), cl::init(false))
bool bitsGT(EVT VT) const
bitsGT - Return true if this has more bits than VT.
static uint8_t encodeAdvSIMDModImmType2(uint64_t Imm)
ArrayRef< int > getMask() const
unsigned countTrailingZeros() const
Count the number of trailing zero bits.
Libcall getFPEXT(EVT OpVT, EVT RetVT)
getFPEXT - Return the FPEXT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
SDValue getTargetConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offset=0, unsigned char TargetFlags=0)
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned char TargetFlags=0)
bool hasExternalWeakLinkage() const
TokenFactor - This node takes multiple tokens as input and produces a single token result...
static int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
bool isBuildVectorAllOnes(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are ~0 or undef...
static SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG)
Iterator for intrusive lists based on ilist_node.
CCState - This class holds information needed while lowering arguments and return values...
bool MaskAndBranchFoldingIsLegal
MaskAndBranchFoldingIsLegal - Indicates if the target supports folding a mask of a single bit...
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
static unsigned getIntrinsicID(const SDNode *N)
void dump() const
Dump this node, for debugging.
static SDValue splitStoreSplat(SelectionDAG &DAG, StoreSDNode &St, SDValue SplatVal, unsigned NumVecElts)
auto find(R &&Range, const T &Val) -> decltype(std::begin(Range))
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly...
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side...
EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc, MachineFunction &MF) const override
Returns the target specific optimal type for load and store operations as a result of memset...
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override
EVT is not used in-tree, but is used by out-of-tree target.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
AArch64TargetLowering(const TargetMachine &TM, const AArch64Subtarget &STI)
unsigned logBase2() const
bool isPredecessorOf(const SDNode *N) const
Return true if this node is a predecessor of N.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Module.h This file contains the declarations for the Module class.
Type * getType() const
All values are typed, get the type of this value.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
static bool memOpAlign(unsigned DstAlign, unsigned SrcAlign, unsigned AlignCheck)
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
Provides information about what library functions are available for the current target.
static bool isLegalArithImmed(uint64_t C)
CCValAssign - Represent assignment of one arg/retval to a location.
const uint32_t * getThisReturnPreservedMask(const MachineFunction &MF, CallingConv::ID) const
getThisReturnPreservedMask - Returns a call preserved mask specific to the case that 'returned' is on...
static unsigned getExtFactor(SDValue &V)
getExtFactor - Determine the adjustment factor for the position when generating an "extract from vect...
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
constexpr size_t array_lengthof(T(&)[N])
Find the length of an array.
BRCOND - Conditional branch.
An SDNode that represents everything that will be needed to construct a MachineInstr.
static bool isAllConstantBuildVector(const SDValue &PotentialBVec, uint64_t &ConstVal)
const SDValue & getChain() const
Byte Swap and Counting operators.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
BasicBlock * GetInsertBlock() const
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
static SDValue tryCombineShiftImm(unsigned IID, SDNode *N, SelectionDAG &DAG)
void dump(const TargetInstrInfo *TII=nullptr) const
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Helper structure to keep track of ISD::SET_CC operands.
bool is64BitVector() const
is64BitVector - Return true if this is a 64-bit vector type.
Represents one node in the SelectionDAG.
bool isProfitableToHoist(Instruction *I) const override
Check if it is profitable to hoist instruction in then/else to if.
CondCode getSetCCInverse(CondCode Operation, bool isInteger)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
void setAdjustsStack(bool V)
static bool isSignExtended(SDNode *N, SelectionDAG &DAG)
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
static GCRegistry::Add< ShadowStackGC > C("shadow-stack","Very portable GC for uncooperative code generators")
bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, unsigned Factor) const override
Lower an interleaved store into a stN intrinsic.
static void changeFPCCToANDAArch64CC(ISD::CondCode CC, AArch64CC::CondCode &CondCode, AArch64CC::CondCode &CondCode2)
Convert a DAG fp condition code to an AArch64 CC.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
static mvt_range integer_valuetypes()
static SDValue performFDivCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
Fold a floating-point divide by power of two into fixed-point to floating-point conversion.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
AttributeSet getAttributes() const
Return the attribute list for this Function.
static bool isZIPMask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
SDValue getCALLSEQ_START(SDValue Chain, SDValue Op, const SDLoc &DL)
Return a new CALLSEQ_START node, which always must have a glue result (to ensure it's not CSE'd)...
bool is128BitVector() const
is128BitVector - Return true if this is a 128-bit vector type.
CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const
Selects the correct CCAssignFn for a given CallingConvention value.
Value * getArgOperand(unsigned i) const
getArgOperand/setArgOperand - Return/set the i-th call argument.
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
Class to represent vector types.
void insertCopiesSplitCSR(MachineBasicBlock *Entry, const SmallVectorImpl< MachineBasicBlock * > &Exits) const override
Insert explicit copies in entry and exit blocks.
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override
Returns true if the given (atomic) store should be expanded by the IR-level AtomicExpand pass into an...
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
void setIndexedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
static unsigned getNZCVToSatisfyCondCode(CondCode Code)
Given a condition code, return NZCV flags that would satisfy that condition.
static bool performTBISimplification(SDValue Addr, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
Simplify given that the top byte of it is ignored by HW during address translation.
Class for arbitrary precision integers.
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
Value * getIRStackGuard(IRBuilder<> &IRB) const override
If the target has a standard location for the stack protector cookie, returns the address of that loc...
iterator_range< use_iterator > uses()
A "pseudo-class" with methods for operating on BUILD_VECTORs.
Select(COND, TRUEVAL, FALSEVAL).
void setMinFunctionAlignment(unsigned Align)
Set the target's minimum function alignment (in log2(bytes))
int64_t getSExtValue() const
static void changeVectorFPCCToAArch64CC(ISD::CondCode CC, AArch64CC::CondCode &CondCode, AArch64CC::CondCode &CondCode2, bool &Invert)
changeVectorFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64 CC usable with the vector...
op_iterator op_begin() const
bool isIntegerTy() const
True if this is an instance of IntegerType.
unsigned EmulatedTLS
EmulatedTLS - This flag enables emulated TLS model, using emutls function in the runtime library...
static uint8_t encodeAdvSIMDModImmType10(uint64_t Imm)
static use_iterator use_end()
bool isNullFPConstant(SDValue V)
Returns true if V is an FP constant with a value of positive zero.
void setPrefLoopAlignment(unsigned Align)
Set the target's preferred loop alignment.
void setMaximumJumpTableSize(unsigned)
Indicate the maximum number of entries in jump tables.
ZERO_EXTEND - Used for integer types, zeroing the new bits.
unsigned getVectorNumElements() const
void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
ANY_EXTEND - Used for integer types. The high bits are undefined.
static SDValue performNVCASTCombine(SDNode *N)
Get rid of unnecessary NVCASTs (that don't change the type).
static SDValue performMulCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
iterator insert(iterator I, T &&Elt)
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
FMINNAN/FMAXNAN - Behave identically to FMINNUM/FMAXNUM, except that when a single input is NaN...
Value * CreatePointerCast(Value *V, Type *DestTy, const Twine &Name="")
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(EVT VT) const
Return the preferred vector type legalization action.
bool isTargetDarwin() const
static SDValue skipExtensionForVectorMULL(SDNode *N, SelectionDAG &DAG)
iterator_range< value_op_iterator > op_values() const
APInt And(const APInt &LHS, const APInt &RHS)
Bitwise AND function for APInt.
static SDValue performPostLD1Combine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, bool IsLaneOp)
Target-specific DAG combine function for post-increment LD1 (lane) and post-increment LD1R...
static bool isAcquireOrStronger(AtomicOrdering ao)
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
static uint8_t encodeAdvSIMDModImmType5(uint64_t Imm)
static Constant * getSequentialMask(IRBuilder<> &Builder, unsigned Start, unsigned NumElts)
Get a mask consisting of sequential integers starting from Start.
static bool isEquivalentMaskless(unsigned CC, unsigned width, ISD::LoadExtType ExtType, int AddConstant, int CompConstant)
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Value * CreateTruncOrBitCast(Value *V, Type *DestTy, const Twine &Name="")
BR_JT - Jumptable branch.
static mvt_range all_valuetypes()
SimpleValueType Iteration.
Representation of each machine instruction.
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the source.
static bool isAdvSIMDModImmType10(uint64_t Imm)
static uint8_t encodeAdvSIMDModImmType8(uint64_t Imm)
These are IR-level optimization flags that may be propagated to SDNodes.
Libcall getFPTOUINT(EVT OpVT, EVT RetVT)
getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned char TargetFlags=0)
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
uint64_t getConstantOperandVal(unsigned i) const
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG)
Bitwise operators - logical and, logical or, logical xor.
pointer data()
Return a pointer to the vector's buffer, even if empty().
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const MCPhysReg * getCalleeSavedRegsViaCopy(const MachineFunction *MF) const
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) const override
This method returns a target specific FastISel object, or null if the target does not support "fast" ...
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
static uint8_t encodeAdvSIMDModImmType4(uint64_t Imm)
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
static uint8_t encodeAdvSIMDModImmType11(uint64_t Imm)
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
unsigned getSizeInBits() const
getSizeInBits - Return the size of the specified value type in bits.
int getVarArgsFPRIndex() const
void ReplaceAllUsesWith(SDValue From, SDValue Op)
Modify anything using 'From' to use 'To' instead.
static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG)
ArrayRef< SDUse > ops() const
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
LLVM_ATTRIBUTE_ALWAYS_INLINE size_type size() const
FunctionType * getFunctionType() const
Returns the FunctionType for me.
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
APFloat abs(APFloat X)
Returns the absolute value of the argument.
MO_PAGEOFF - A symbol operand with this flag represents the offset of that symbol within a 4K page...
The memory access always returns the same value (or traps).
CallInst * CreateCall(Value *Callee, ArrayRef< Value * > Args=None, const Twine &Name="", MDNode *FPMathTag=nullptr)
unsigned MaxStoresPerMemmoveOptSize
Maximum number of store instructions that may be substituted for a call to memmove, used for functions with OptSize attribute.
unsigned MaxStoresPerMemcpyOptSize
Maximum number of store operations that may be substituted for a call to memcpy, used for functions w...
Value * emitLoadLinked(IRBuilder<> &Builder, Value *Addr, AtomicOrdering Ord) const override
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type...
void setStackPointerRegisterToSaveRestore(unsigned R)
If set to a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save and restore.
static SDValue getTestBitOperand(SDValue Op, unsigned &Bit, bool &Invert, SelectionDAG &DAG)
static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, int64_t &Cnt)
isVShiftRImm - Check if this is a valid build_vector for the immediate operand of a vector shift righ...
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, unsigned Intrinsic) const override
getTgtMemIntrinsic - Represent NEON load and store intrinsics as MemIntrinsicNodes.
op_iterator op_end() const
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
unsigned getVarArgsGPRSize() const
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
static SDValue performIntrinsicCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
VectorType * getType() const
Overload to return most specific vector type.
static SDValue replaceSplatVectorStore(SelectionDAG &DAG, StoreSDNode &St)
Replace a splat of a scalar to a vector store by scalar stores of the scalar value.
bool supportsAddressTopByteIgnored() const
CPU has TBI (top byte of addresses is ignored during HW address translation) and OS enables it...
FSINCOS - Compute both fsin and fcos as a single operation.
bool useLoadStackGuardNode() const override
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
unsigned MaxStoresPerMemcpy
Specify maximum bytes of store instructions per memcpy call.
int32_t getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements, uint32_t BitWidth) const
If this is a constant FP splat and the splatted constant FP is an exact power or 2, return the log base 2 integer value.
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSS, const AllocaInst *Alloca=nullptr)
Create a new statically sized stack object, returning a nonnegative identifier to represent it...
EVT getValueType() const
Return the ValueType of the referenced return value.
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
void setBytesInStackArgArea(unsigned bytes)
Libcall getSINTTOFP(EVT OpVT, EVT RetVT)
getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
void setUnsafeAlgebra(bool b)
bool is128BitVector() const
is128BitVector - Return true if this is a 128-bit vector type.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
static SDValue replaceZeroVectorStore(SelectionDAG &DAG, StoreSDNode &St)
Replace a splat of zeros to a vector store by scalar stores of WZR/XZR.
bool isLegalAddImmediate(int64_t) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
unsigned getReg() const
getReg - Returns the register number.
bool isFloatingPoint() const
isFloatingPoint - Return true if this is a FP, or a vector FP type.
bool EnableExtLdPromotion
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static SDValue tryMatchAcrossLaneShuffleForReduction(SDNode *N, SDValue OpV, unsigned Op, SelectionDAG &DAG)
This function handles the log2-shuffle pattern produced by the.
void insert(iterator MBBI, MachineBasicBlock *MBB)
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
static AArch64CC::CondCode changeIntCCToAArch64CC(ISD::CondCode CC)
changeIntCCToAArch64CC - Convert a DAG integer condition code to an AArch64 CC
static bool isSetCC(SDValue Op, SetCCInfoAndKind &SetCCInfo)
Check whether or not Op is a SET_CC operation, either a generic or an AArch64 lowered one...
void setReturnAddressIsTaken(bool s)
bool isSimple() const
isSimple - Test if the given EVT is simple (as opposed to being extended).
unsigned getAlignment() const
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Module * getParent()
Get the module that this global value is contained inside of...
LLVM Value Representation.
FMA - Perform a * b + c with no intermediate rounding step.
SDValue getRegister(unsigned Reg, EVT VT)
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
unsigned getMaximumJumpTableSize() const
Return upper limit for number of entries in a jump table.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
const AArch64InstrInfo * getInstrInfo() const override
static void ReplaceBITCASTResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG)
static VectorType * get(Type *ElementType, unsigned NumElements)
This static method is the primary way to construct an VectorType.
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
SDValue getValueType(EVT)
Value * emitStoreConditional(IRBuilder<> &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override
Perform a store-conditional operation to Addr.
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt)
getVShiftImm - Check if this is a valid build_vector for the immediate operand of a vector shift oper...
PREFETCH - This corresponds to a prefetch intrinsic.
static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, bool isSigned)
unsigned getPrefFunctionAlignment() const
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
uint64_t getTypeSizeInBits(Type *Ty) const
Size examples:
unsigned getMaxSupportedInterleaveFactor() const override
Get the maximum supported factor for interleaved memory accesses.
const TargetLowering & getTargetLoweringInfo() const
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.Val alone...
Primary interface to the complete machine description for the target machine.
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
StringRef - Represent a constant reference to a string, i.e.
MO_NC - Indicates whether the linker is expected to check the symbol reference for overflow...
SetCC operator - This evaluates to a true value iff the condition is true.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
unsigned getPrefLoopAlignment() const
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
static uint8_t encodeAdvSIMDModImmType9(uint64_t Imm)
unsigned MaxStoresPerMemsetOptSize
Maximum number of stores operations that may be substituted for the call to memset, used for functions with OptSize attribute.
static bool isConcatMask(ArrayRef< int > Mask, EVT VT, bool SplitLHS)
static bool isAdvSIMDModImmType11(uint64_t Imm)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml","ocaml 3.10-compatible collector")
bool hasPairedLoad(EVT LoadedType, unsigned &RequiredAligment) const override
Return true if the target supplies and combines to a paired load two loaded values of type LoadedType...
bool operator==(uint64_t V1, const APInt &V2)
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
unsigned getLocMemOffset() const
MVT getVectorElementType() const
static bool isVolatile(Instruction *Inst)
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
Provide custom lowering hooks for some operations.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
unsigned getNumUses() const
This method computes the number of uses of this Value.
SDValue getTargetInsertSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
TRUNCATE - Completely drop the high bits.
static void Split(std::vector< std::string > &V, StringRef S)
Split - Splits a string of comma separated items in to a vector of strings.
auto find_if(R &&Range, UnaryPredicate P) -> decltype(std::begin(Range))
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly...
static SDValue emitConditionalComparison(SDValue LHS, SDValue RHS, ISD::CondCode CC, SDValue CCOp, AArch64CC::CondCode Predicate, AArch64CC::CondCode OutCC, const SDLoc &DL, SelectionDAG &DAG)
Create a conditional comparison; Use CCMP, CCMN or FCCMP as appropriate.
bool isUIntN(unsigned N, uint64_t x)
isUIntN - Checks if an unsigned integer fits into the given (dynamic) bit width.
unsigned getAlignment() const
static void getShuffleMask(Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
bool isShuffleMaskLegal(const SmallVectorImpl< int > &M, EVT VT) const override
Return true if the given shuffle mask can be codegen'd directly, or if it should be stack expanded...
bool is64BitVector() const
is64BitVector - Return true if this is a 64-bit vector type.
FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW, FLOG, FLOG2, FLOG10, FEXP, FEXP2, FCEIL, FTRUNC, FRINT, FNEARBYINT, FROUND, FFLOOR - Perform various unary floating point operations.
static uint8_t encodeAdvSIMDModImmType7(uint64_t Imm)
Value * getPointerOperand()
static SDValue getEstimate(const AArch64Subtarget *ST, unsigned Opcode, SDValue Operand, SelectionDAG &DAG, int &ExtraSteps)
Fast - This calling convention attempts to make calls as fast as possible (e.g.
MachineInstr::mmo_iterator allocateMemRefsArray(unsigned long Num)
allocateMemRefsArray - Allocate an array to hold MachineMemOperand pointers.
static SDValue performSRLCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
unsigned Log2_64(uint64_t Value)
Log2_64 - This function returns the floor log base 2 of the specified value, -1 if the value is zero...
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
static bool isSplatMask(const int *Mask, EVT VT)
static bool resolveBuildVector(BuildVectorSDNode *BVN, APInt &CnstBits, APInt &UndefBits)
EVT changeVectorElementTypeToInteger() const
changeVectorElementTypeToInteger - Return a vector with the same number of elements as this vector...
bool PredictableSelectIsExpensive
Tells the code generator that select is more expensive than a branch if the branch is usually predict...
MVT getSimpleVT() const
getSimpleVT - Return the SimpleValueType held in the specified simple EVT.
static bool canGuaranteeTCO(CallingConv::ID CC)
Return true if the calling convention is one that we can guarantee TCO for.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode...
LLVMContext & getContext() const
Get the global data context.
void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO)
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned char TargetFlags=0)
bool isFMAFasterThanFMulAndFAdd(EVT VT) const override
Return true if an FMA operation is faster than a pair of fmul and fadd instructions.
static SDValue tryLowerToSLI(SDNode *N, SelectionDAG &DAG)
void setIndexedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
gep_type_iterator gep_type_begin(const User *GEP)
uint64_t getZExtValue() const
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
unsigned getVectorNumElements() const
getVectorNumElements - Given a vector type, return the number of elements it contains.
This class is used to represent ISD::LOAD nodes.
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary...
void setIsSplitCSR(bool s)
static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, SDValue &AArch64cc, SelectionDAG &DAG, const SDLoc &dl)