38 #define DEBUG_TYPE "aarch64-lower"
40 STATISTIC(NumTailCalls,
"Number of tail calls");
41 STATISTIC(NumShiftInserts,
"Number of vector shift inserts");
54 clEnumValN(StrictAlign,
"aarch64-strict-align",
55 "Disallow all unaligned memory accesses"),
56 clEnumValN(NoStrictAlign,
"aarch64-no-strict-align",
57 "Allow unaligned memory accesses"),
63 cl::desc(
"Allow AArch64 (or (shift)(shift))->extract"),
68 cl::desc(
"Allow AArch64 SLI/SRI formation"),
76 cl::desc(
"Allow AArch64 Local Dynamic TLS code generation"),
515 RequireStrictAlign = (
Align == StrictAlign);
623 void AArch64TargetLowering::addTypeForNEON(
EVT VT,
EVT PromotedBitwiseVT) {
698 void AArch64TargetLowering::addDRTypeForNEON(
MVT VT) {
703 void AArch64TargetLowering::addQRTypeForNEON(
MVT VT) {
725 APInt KnownZero2, KnownOne2;
728 KnownZero &= KnownZero2;
729 KnownOne &= KnownOne2;
737 case Intrinsic::aarch64_ldaxr:
738 case Intrinsic::aarch64_ldxr: {
740 EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT();
750 unsigned IntNo = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
754 case Intrinsic::aarch64_neon_umaxv:
755 case Intrinsic::aarch64_neon_uminv: {
763 assert(BitWidth >= 8 &&
"Unexpected width!");
767 assert(BitWidth >= 16 &&
"Unexpected width!");
954 BuildMI(MBB, DL, TII->
get(AArch64::B)).addMBB(EndBB);
986 case AArch64::F128CSEL:
1128 return (C >> 12 == 0) || ((C & 0xFFFULL) == 0 && C >> 24 == 0);
1145 cast<ConstantSDNode>(RHS.
getOperand(0))->getZExtValue() == 0 &&
1160 cast<ConstantSDNode>(RHS)->getZExtValue() == 0 &&
1180 uint64_t C = RHSC->getZExtValue();
1188 if ((VT ==
MVT::i32 && C != 0x80000000 &&
1190 (VT ==
MVT::i64 && C != 0x80000000ULL &&
1193 C = (VT ==
MVT::i32) ? (uint32_t)(C - 1) : C - 1;
1203 C = (VT ==
MVT::i32) ? (uint32_t)(C - 1) : C - 1;
1209 if ((VT ==
MVT::i32 && C != INT32_MAX &&
1211 (VT ==
MVT::i64 && C != INT64_MAX &&
1214 C = (VT ==
MVT::i32) ? (uint32_t)(C + 1) : C + 1;
1220 if ((VT ==
MVT::i32 && C != UINT32_MAX &&
1222 (VT ==
MVT::i64 && C != UINT64_MAX &&
1225 C = (VT ==
MVT::i32) ? (uint32_t)(C + 1) : C + 1;
1249 if ((cast<ConstantSDNode>(RHS)->getZExtValue() >> 16 == 0) &&
1250 isa<LoadSDNode>(LHS)) {
1251 if (cast<LoadSDNode>(LHS)->getExtensionType() ==
ISD::ZEXTLOAD &&
1252 cast<LoadSDNode>(LHS)->getMemoryVT() ==
MVT::i16 &&
1254 int16_t ValueofRHS = cast<ConstantSDNode>(RHS)->getZExtValue();
1276 static std::pair<SDValue, SDValue>
1279 "Unsupported value type");
1353 UpperBits).getValue(1);
1375 UpperBits).getValue(1);
1385 Value = DAG.
getNode(Opc, DL, VTs, LHS, RHS);
1388 return std::make_pair(Value, Overflow);
1430 if (!CFVal || !CTVal)
1435 if (CTVal->isAllOnesValue() && CFVal->
isNullValue()) {
1467 bool ExtraOp =
false;
1526 unsigned IsWrite = cast<ConstantSDNode>(Op.
getOperand(2))->getZExtValue();
1527 unsigned Locality = cast<ConstantSDNode>(Op.
getOperand(3))->getZExtValue();
1528 unsigned IsData = cast<ConstantSDNode>(Op.
getOperand(4))->getZExtValue();
1530 bool IsStream = !Locality;
1534 assert(Locality <= 3 &&
"Prefetch locality out-of-range");
1538 Locality = 3 - Locality;
1542 unsigned PrfOp = (IsWrite << 4) |
1557 return LowerF128Call(Op, DAG, LC);
1575 false,
SDLoc(Op)).first;
1656 In = DAG.
getNode(CastOpc, dl, CastVT, In);
1692 return LowerF128Call(Op, DAG, LC);
1709 Entry.isSExt =
false;
1710 Entry.isZExt =
false;
1711 Args.push_back(Entry);
1713 const char *LibcallName =
1714 (ArgVT ==
MVT::f64) ?
"__sincos_stret" :
"__sincosf_stret";
1723 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
1724 return CallResult.first;
1746 assert(OrigVT.
isSimple() &&
"Expecting a simple value type");
1749 switch (OrigSimpleTy) {
1762 unsigned ExtOpcode) {
1786 unsigned HalfSize = EltSize / 2;
1788 if (!
isIntN(HalfSize, C->getSExtValue()))
1791 if (!
isUIntN(HalfSize, C->getZExtValue()))
1812 unsigned EltSize = VT.getVectorElementType().getSizeInBits() / 2;
1813 unsigned NumElts = VT.getVectorNumElements();
1816 for (
unsigned i = 0; i != NumElts; ++i) {
1870 "unexpected type for custom-lowering ISD::MUL");
1873 unsigned NewOpc = 0;
1877 if (isN0SExt && isN1SExt)
1882 if (isN0ZExt && isN1ZExt)
1884 else if (isN1SExt || isN1ZExt) {
1918 "unexpected types for extended operands to VMULL");
1919 return DAG.
getNode(NewOpc, DL, VT, Op0, Op1);
1927 return DAG.
getNode(N0->getOpcode(), DL, VT,
1943 return LowerGlobalAddress(Op, DAG);
1945 return LowerGlobalTLSAddress(Op, DAG);
1947 return LowerSETCC(Op, DAG);
1949 return LowerBR_CC(Op, DAG);
1951 return LowerSELECT(Op, DAG);
1953 return LowerSELECT_CC(Op, DAG);
1955 return LowerJumpTable(Op, DAG);
1957 return LowerConstantPool(Op, DAG);
1959 return LowerBlockAddress(Op, DAG);
1961 return LowerVASTART(Op, DAG);
1963 return LowerVACOPY(Op, DAG);
1965 return LowerVAARG(Op, DAG);
1987 return LowerFP_ROUND(Op, DAG);
1989 return LowerFP_EXTEND(Op, DAG);
1991 return LowerFRAMEADDR(Op, DAG);
1993 return LowerRETURNADDR(Op, DAG);
1995 return LowerINSERT_VECTOR_ELT(Op, DAG);
1997 return LowerEXTRACT_VECTOR_ELT(Op, DAG);
1999 return LowerBUILD_VECTOR(Op, DAG);
2001 return LowerVECTOR_SHUFFLE(Op, DAG);
2003 return LowerEXTRACT_SUBVECTOR(Op, DAG);
2007 return LowerVectorSRA_SRL_SHL(Op, DAG);
2009 return LowerShiftLeftParts(Op, DAG);
2012 return LowerShiftRightParts(Op, DAG);
2014 return LowerCTPOP(Op, DAG);
2016 return LowerFCOPYSIGN(Op, DAG);
2018 return LowerVectorAND(Op, DAG);
2020 return LowerVectorOR(Op, DAG);
2027 return LowerINT_TO_FP(Op, DAG);
2030 return LowerFP_TO_INT(Op, DAG);
2032 return LowerFSINCOS(Op, DAG);
2047 #include "AArch64GenCallingConv.inc"
2051 bool IsVarArg)
const {
2056 return CC_AArch64_WebKit_JS;
2058 return CC_AArch64_GHC;
2062 return CC_AArch64_AAPCS;
2063 return IsVarArg ? CC_AArch64_DarwinPCS_VarArg : CC_AArch64_DarwinPCS;
2067 SDValue AArch64TargetLowering::LowerFormalArguments(
2085 unsigned NumArgs = Ins.
size();
2087 unsigned CurArgIdx = 0;
2088 for (
unsigned i = 0; i != NumArgs; ++i) {
2089 MVT ValVT = Ins[i].VT;
2090 if (Ins[i].isOrigArg()) {
2091 std::advance(CurOrigArg, Ins[i].getOrigArgIndex() - CurArgIdx);
2092 CurArgIdx = Ins[i].getOrigArgIndex();
2107 assert(!Res &&
"Call operand has unhandled type");
2110 assert(ArgLocs.
size() == Ins.
size());
2112 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
2115 if (Ins[i].
Flags.isByVal()) {
2119 int Size = Ins[i].Flags.getByValSize();
2120 unsigned NumRegs = (Size + 7) / 8;
2140 RC = &AArch64::GPR32RegClass;
2142 RC = &AArch64::GPR64RegClass;
2144 RC = &AArch64::FPR16RegClass;
2145 else if (RegVT == MVT::f32)
2146 RC = &AArch64::FPR32RegClass;
2148 RC = &AArch64::FPR64RegClass;
2150 RC = &AArch64::FPR128RegClass;
2174 assert(RegVT == Ins[i].VT &&
"incorrect register location selected");
2181 assert(VA.
isMemLoc() &&
"CCValAssign is neither reg nor mem");
2185 uint32_t BEAlign = 0;
2187 !Ins[i].Flags.isInConsecutiveRegs())
2188 BEAlign = 8 - ArgSize;
2219 MemVT,
false,
false,
false, 0);
2231 saveVarArgRegisters(CCInfo, DAG, DL, Chain);
2236 unsigned StackOffset = CCInfo.getNextStackOffset();
2238 StackOffset = ((StackOffset + 7) & ~7);
2243 unsigned StackArgSize = CCInfo.getNextStackOffset();
2245 if (DoesCalleeRestoreStack(CallConv, TailCallOpt)) {
2266 void AArch64TargetLowering::saveVarArgRegisters(
CCState &CCInfo,
2277 AArch64::X3, AArch64::X4, AArch64::X5,
2278 AArch64::X6, AArch64::X7 };
2279 static const unsigned NumGPRArgRegs =
array_lengthof(GPRArgRegs);
2282 unsigned GPRSaveSize = 8 * (NumGPRArgRegs - FirstVariadicGPR);
2284 if (GPRSaveSize != 0) {
2289 for (
unsigned i = FirstVariadicGPR; i < NumGPRArgRegs; ++i) {
2290 unsigned VReg = MF.
addLiveIn(GPRArgRegs[i], &AArch64::GPR64RegClass);
2295 MemOps.push_back(Store);
2305 AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3,
2306 AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64::Q7};
2307 static const unsigned NumFPRArgRegs =
array_lengthof(FPRArgRegs);
2310 unsigned FPRSaveSize = 16 * (NumFPRArgRegs - FirstVariadicFPR);
2312 if (FPRSaveSize != 0) {
2317 for (
unsigned i = FirstVariadicFPR; i < NumFPRArgRegs; ++i) {
2318 unsigned VReg = MF.
addLiveIn(FPRArgRegs[i], &AArch64::FPR128RegClass);
2324 MemOps.push_back(Store);
2333 if (!MemOps.empty()) {
2340 SDValue AArch64TargetLowering::LowerCallResult(
2346 ? RetCC_AArch64_WebKit_JS
2347 : RetCC_AArch64_AAPCS;
2355 for (
unsigned i = 0; i != RVLocs.
size(); ++i) {
2360 if (i == 0 && isThisReturn) {
2362 "unexpected return calling convention register assignment");
2388 bool AArch64TargetLowering::isEligibleForTailCallOptimization(
2390 bool isCalleeStructRet,
bool isCallerStructRet,
2397 if (!IsTailCallConvention(CalleeCC) && CalleeCC !=
CallingConv::C)
2403 bool CCMatch = CallerCC == CalleeCC;
2411 if (i->hasByValAttr())
2415 if (IsTailCallConvention(CalleeCC) && CCMatch)
2442 "Unexpected variadic calling convention");
2444 if (isVarArg && !Outs.
empty()) {
2456 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i)
2457 if (!ArgLocs[i].isRegLoc())
2474 if (RVLocs1.
size() != RVLocs2.
size())
2476 for (
unsigned i = 0, e = RVLocs1.
size(); i != e; ++i) {
2477 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
2479 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
2481 if (RVLocs1[i].isRegLoc()) {
2482 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
2485 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
2508 SDValue AArch64TargetLowering::addTokenForArgument(
SDValue Chain,
2511 int ClobberedFI)
const {
2514 int64_t LastByte = FirstByte + MFI->
getObjectSize(ClobberedFI) - 1;
2525 if (
LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
2527 if (FI->getIndex() < 0) {
2529 int64_t InLastByte = InFirstByte;
2532 if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) ||
2533 (FirstByte <= InFirstByte && InFirstByte <= LastByte))
2541 bool AArch64TargetLowering::DoesCalleeRestoreStack(
CallingConv::ID CallCC,
2542 bool TailCallOpt)
const {
2546 bool AArch64TargetLowering::IsTailCallConvention(
CallingConv::ID CallCC)
const {
2553 AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
2562 bool &IsTailCall = CLI.IsTailCall;
2564 bool IsVarArg = CLI.IsVarArg;
2567 bool IsStructRet = (Outs.
empty()) ?
false : Outs[0].
Flags.isSRet();
2568 bool IsThisReturn =
false;
2572 bool IsSibCall =
false;
2576 IsTailCall = isEligibleForTailCallOptimization(
2577 Callee, CallConv, IsVarArg, IsStructRet,
2579 if (!IsTailCall && CLI.CS && CLI.CS->isMustTailCall())
2581 "site marked musttail");
2585 if (!TailCallOpt && IsTailCall)
2600 unsigned NumArgs = Outs.
size();
2602 for (
unsigned i = 0; i != NumArgs; ++i) {
2603 MVT ArgVT = Outs[i].VT;
2608 assert(!Res &&
"Call operand has unhandled type");
2618 unsigned NumArgs = Outs.
size();
2619 for (
unsigned i = 0; i != NumArgs; ++i) {
2620 MVT ValVT = Outs[i].VT;
2623 CLI.getArgs()[Outs[i].OrigArgIndex].Ty,
2635 assert(!Res &&
"Call operand has unhandled type");
2656 if (IsTailCall && !IsSibCall) {
2666 FPDiff = NumReusableBytes - NumBytes;
2673 assert(FPDiff % 16 == 0 &&
"unaligned stack on tail call");
2691 for (
unsigned i = 0, realArgIdx = 0, e = ArgLocs.
size(); i != e;
2692 ++i, ++realArgIdx) {
2694 SDValue Arg = OutVals[realArgIdx];
2710 if (Outs[realArgIdx].ArgVT ==
MVT::i1) {
2728 "unexpected calling convention register assignment");
2729 assert(!Ins.empty() && Ins[0].VT ==
MVT::i64 &&
2730 "unexpected use of 'returned'");
2731 IsThisReturn =
true;
2742 uint32_t BEAlign = 0;
2745 OpSize = (OpSize + 7) / 8;
2749 BEAlign = 8 - OpSize;
2752 int32_t Offset = LocMemOffset + BEAlign;
2757 Offset = Offset + FPDiff;
2766 Chain = addTokenForArgument(Chain, DAG, MF.
getFrameInfo(), FI);
2774 if (Outs[i].Flags.
isByVal()) {
2778 Chain, DL, DstAddr, Arg, SizeNode, Outs[i].Flags.
getByValAlign(),
2793 DAG.
getStore(Chain, DL, Arg, DstAddr, DstInfo,
false,
false, 0);
2799 if (!MemOpChains.
empty())
2805 for (
unsigned i = 0, e = RegsToPass.
size(); i != e; ++i) {
2806 Chain = DAG.
getCopyToReg(Chain, DL, RegsToPass[i].first,
2807 RegsToPass[i].second, InFlag);
2819 if (InternalLinkage)
2827 dyn_cast<ExternalSymbolSDNode>(Callee)) {
2828 const char *Sym = S->getSymbol();
2836 const char *Sym = S->getSymbol();
2844 if (IsTailCall && !IsSibCall) {
2850 std::vector<SDValue> Ops;
2851 Ops.push_back(Chain);
2852 Ops.push_back(Callee);
2863 for (
unsigned i = 0, e = RegsToPass.
size(); i != e; ++i)
2864 Ops.push_back(DAG.
getRegister(RegsToPass[i].first,
2865 RegsToPass[i].second.getValueType()));
2868 const uint32_t *Mask;
2874 IsThisReturn =
false;
2880 assert(Mask &&
"Missing call preserved mask for calling convention");
2884 Ops.push_back(InFlag);
2899 uint64_t CalleePopBytes = DoesCalleeRestoreStack(CallConv, TailCallOpt)
2911 return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
2912 InVals, IsThisReturn,
2913 IsThisReturn ? OutVals[0] :
SDValue());
2916 bool AArch64TargetLowering::CanLowerReturn(
2920 ? RetCC_AArch64_WebKit_JS
2921 : RetCC_AArch64_AAPCS;
2923 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2934 ? RetCC_AArch64_WebKit_JS
2935 : RetCC_AArch64_AAPCS;
2944 for (
unsigned i = 0, realRVLocIdx = 0; i != RVLocs.
size();
2945 ++i, ++realRVLocIdx) {
2947 assert(VA.
isRegLoc() &&
"Can only return in registers!");
2948 SDValue Arg = OutVals[realRVLocIdx];
2954 if (Outs[i].ArgVT ==
MVT::i1) {
2976 RetOps.push_back(Flag);
2991 unsigned char OpFlags =
2994 assert(cast<GlobalAddressSDNode>(Op)->
getOffset() == 0 &&
2995 "unexpected offset in global node");
3007 "use of MO_CONSTPOOL only supported on small model");
3074 AArch64TargetLowering::LowerDarwinGlobalTLSAddress(
SDValue Op,
3076 assert(Subtarget->
isTargetDarwin() &&
"TLS only supported on Darwin");
3080 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
3091 false,
true,
true, 8);
3100 const uint32_t *Mask =
3150 AArch64TargetLowering::LowerELFGlobalTLSAddress(
SDValue Op,
3152 assert(Subtarget->
isTargetELF() &&
"This function expects an ELF target");
3154 "ELF TLS only supported in small memory model");
3215 TPOff = LowerELFTLSDescCallSeq(SymAddr, DL, DAG);
3239 TPOff = LowerELFTLSDescCallSeq(SymAddr, DL, DAG);
3249 return LowerDarwinGlobalTLSAddress(Op, DAG);
3251 return LowerELFGlobalTLSAddress(Op, DAG);
3280 if (LHS.
getResNo() == 1 && isa<ConstantSDNode>(RHS) &&
3281 cast<ConstantSDNode>(RHS)->isOne() &&
3285 "Unexpected condition code.");
3396 if (SrcVT == MVT::f32 && VT ==
MVT::f64)
3398 else if (SrcVT ==
MVT::f64 && VT == MVT::f32)
3414 EltMask = 0x80000000ULL;
3505 return LowerVSETCC(Op, DAG);
3525 "Unexpected setcc expansion!");
3589 if (CCmp && CResult && Cmp.
getValueType() == MVT::f32 &&
3594 return CResult->getValueAPF().bitwiseIsEqual(CmpVal);
3657 }
else if (CTVal && CFVal) {
3658 const int64_t TrueVal = CTVal->getSExtValue();
3665 if (TrueVal == ~FalseVal) {
3667 }
else if (TrueVal == -FalseVal) {
3675 const uint32_t TrueVal32 = CTVal->getZExtValue();
3678 if ((TrueVal32 == FalseVal32 + 1) || (TrueVal32 + 1 == FalseVal32)) {
3681 if (TrueVal32 > FalseVal32) {
3686 }
else if ((TrueVal == FalseVal + 1) || (TrueVal + 1 == FalseVal)) {
3689 if (TrueVal > FalseVal) {
3712 return DAG.
getNode(Opcode, dl, VT, TVal, FVal, CCVal, Cmp);
3747 return LowerSELECT_CC(CC, LHS, RHS, TVal, FVal, DL, DAG);
3782 CC = cast<CondCodeSDNode>(CCVal->getOperand(2))->
get();
3785 RHS = DAG.
getConstant(0, DL, CCVal.getValueType());
3788 return LowerSELECT_CC(CC, LHS, RHS, TVal, FVal, DL, DAG);
3862 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
3891 const Value *SV = cast<SrcValueSDNode>(Op.
getOperand(2))->getValue();
3907 const Value *SV = cast<SrcValueSDNode>(Op.
getOperand(2))->getValue();
3967 return Subtarget->
isTargetDarwin() ? LowerDarwin_VASTART(Op, DAG)
3968 : LowerAAPCS_VASTART(Op, DAG);
3977 const Value *DestSV = cast<SrcValueSDNode>(Op.
getOperand(3))->getValue();
3978 const Value *SrcSV = cast<SrcValueSDNode>(Op.
getOperand(4))->getValue();
3989 "automatic va_arg instruction only works on Darwin");
3991 const Value *V = cast<SrcValueSDNode>(Op.
getOperand(2))->getValue();
4000 false,
false,
false, 0);
4004 assert(((Align & (Align - 1)) == 0) &&
"Expected Align to be a power of 2");
4020 bool NeedFPTrunc =
false;
4057 unsigned Depth = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
4068 unsigned AArch64TargetLowering::getRegisterByName(
const char* RegName,
EVT VT,
4071 .Case(
"sp", AArch64::SP)
4087 unsigned Depth = cast<ConstantSDNode>(Op.
getOperand(0))->getZExtValue();
4089 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
4097 unsigned Reg = MF.
addLiveIn(AArch64::LR, &AArch64::GPR64RegClass);
4129 SDValue TrueValLo = DAG.
getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
4137 ? DAG.
getNode(Opc, dl, VT, ShOpHi,
4154 unsigned VTBits = VT.getSizeInBits();
4203 else if (VT == MVT::f32)
4243 AArch64TargetLowering::getConstraintType(
StringRef Constraint)
const {
4244 if (Constraint.
size() == 1) {
4245 switch (Constraint[0]) {
4266 AArch64TargetLowering::getSingleConstraintMatchWeight(
4267 AsmOperandInfo &
info,
const char *constraint)
const {
4269 Value *CallOperandVal = info.CallOperandVal;
4272 if (!CallOperandVal)
4276 switch (*constraint) {
4292 std::pair<unsigned, const TargetRegisterClass *>
4293 AArch64TargetLowering::getRegForInlineAsmConstraint(
4295 if (Constraint.
size() == 1) {
4296 switch (Constraint[0]) {
4299 return std::make_pair(0U, &AArch64::GPR64commonRegClass);
4300 return std::make_pair(0U, &AArch64::GPR32commonRegClass);
4303 return std::make_pair(0U, &AArch64::FPR32RegClass);
4305 return std::make_pair(0U, &AArch64::FPR64RegClass);
4307 return std::make_pair(0U, &AArch64::FPR128RegClass);
4313 return std::make_pair(0U, &AArch64::FPR128_loRegClass);
4318 return std::make_pair(
unsigned(
AArch64::NZCV), &AArch64::CCRRegClass);
4322 std::pair<unsigned, const TargetRegisterClass *> Res;
4327 unsigned Size = Constraint.
size();
4328 if ((Size == 4 || Size == 5) && Constraint[0] ==
'{' &&
4329 tolower(Constraint[1]) ==
'v' && Constraint[Size - 1] ==
'}') {
4332 if (!Failed && RegNo >= 0 && RegNo <= 31) {
4336 Res.first = AArch64::FPR128RegClass.getRegister(RegNo);
4337 Res.second = &AArch64::FPR128RegClass;
4347 void AArch64TargetLowering::LowerAsmOperandForConstraint(
4348 SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
4353 if (Constraint.length() != 1)
4356 char ConstraintLetter = Constraint[0];
4357 switch (ConstraintLetter) {
4388 switch (ConstraintLetter) {
4396 if (isUInt<12>(CVal) || isShiftedUInt<12, 12>(CVal))
4401 if (isUInt<12>(NVal) || isShiftedUInt<12, 12>(NVal)) {
4433 if ((CVal & 0xFFFF) == CVal)
4435 if ((CVal & 0xFFFF0000ULL) == CVal)
4437 uint64_t NCVal = ~(uint32_t)CVal;
4438 if ((NCVal & 0xFFFFULL) == NCVal)
4440 if ((NCVal & 0xFFFF0000ULL) == NCVal)
4447 if ((CVal & 0xFFFFULL) == CVal)
4449 if ((CVal & 0xFFFF0000ULL) == CVal)
4451 if ((CVal & 0xFFFF00000000ULL) == CVal)
4453 if ((CVal & 0xFFFF000000000000ULL) == CVal)
4455 uint64_t NCVal = ~CVal;
4456 if ((NCVal & 0xFFFFULL) == NCVal)
4458 if ((NCVal & 0xFFFF0000ULL) == NCVal)
4460 if ((NCVal & 0xFFFF00000000ULL) == NCVal)
4462 if ((NCVal & 0xFFFF000000000000ULL) == NCVal)
4476 Ops.push_back(Result);
4528 struct ShuffleSourceInfo {
4544 ShuffleSourceInfo(
SDValue Vec)
4545 : Vec(Vec), MinElt(UINT_MAX), MaxElt(0), ShuffleVec(Vec), WindowBase(0),
4552 for (
unsigned i = 0; i < NumElts; ++i) {
4564 auto Source = std::find(Sources.
begin(), Sources.
end(), SourceVec);
4569 unsigned EltNo = cast<ConstantSDNode>(V.
getOperand(1))->getZExtValue();
4576 if (Sources.
size() > 2)
4582 for (
auto &
Source : Sources) {
4583 EVT SrcEltTy =
Source.Vec.getValueType().getVectorElementType();
4584 if (SrcEltTy.
bitsLT(SmallestEltTy)) {
4585 SmallestEltTy = SrcEltTy;
4588 unsigned ResMultiplier =
4596 for (
auto &Src : Sources) {
4597 EVT SrcVT = Src.ShuffleVec.getValueType();
4614 DAG.
getUNDEF(Src.ShuffleVec.getValueType()));
4620 if (Src.MaxElt - Src.MinElt >= NumSrcElts) {
4625 if (Src.MinElt >= NumSrcElts) {
4630 Src.WindowBase = -NumSrcElts;
4631 }
else if (Src.MaxElt < NumSrcElts) {
4649 Src.WindowBase = -Src.MinElt;
4656 for (
auto &Src : Sources) {
4658 if (SrcEltTy == SmallestEltTy)
4663 Src.WindowBase *= Src.WindowScale;
4668 for (
auto Src : Sources)
4669 assert(Src.ShuffleVec.getValueType() == ShuffleVT);
4680 auto Src = std::find(Sources.begin(), Sources.end(), Entry.
getOperand(0));
4681 int EltNo = cast<ConstantSDNode>(Entry.
getOperand(1))->getSExtValue();
4689 int LanesDefined = BitsDefined / BitsPerShuffleLane;
4693 int *LaneMask = &Mask[i * ResMultiplier];
4695 int ExtractBase = EltNo * Src->WindowScale + Src->WindowBase;
4696 ExtractBase += NumElts * (Src - Sources.begin());
4697 for (
int j = 0; j < LanesDefined; ++j)
4698 LaneMask[j] = ExtractBase + j;
4706 for (
unsigned i = 0; i < Sources.size(); ++i)
4707 ShuffleOps[i] = Sources[i].ShuffleVec;
4710 ShuffleOps[1], &Mask[0]);
4728 unsigned ExpectedElt = Imm;
4729 for (
unsigned i = 1; i < NumElts; ++i) {
4733 if (ExpectedElt == NumElts)
4738 if (ExpectedElt != static_cast<unsigned>(M[i]))
4750 const int *FirstRealElt = std::find_if(M.
begin(), M.
end(),
4751 [](
int Elt) {
return Elt >= 0;});
4756 APInt ExpectedElt =
APInt(MaskBits, *FirstRealElt + 1);
4759 const int *FirstWrongElt = std::find_if(FirstRealElt + 1, M.
end(),
4760 [&](
int Elt) {
return Elt != ExpectedElt++ && Elt != -1;});
4761 if (FirstWrongElt != M.
end())
4790 assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) &&
4791 "Only possible block sizes for REV are: 16, 32, 64");
4798 unsigned BlockElts = M[0] + 1;
4801 BlockElts = BlockSize / EltSz;
4803 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
4806 for (
unsigned i = 0; i < NumElts; ++i) {
4809 if ((
unsigned)M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts))
4818 WhichResult = (M[0] == 0 ? 0 : 1);
4819 unsigned Idx = WhichResult * NumElts / 2;
4820 for (
unsigned i = 0; i != NumElts; i += 2) {
4821 if ((M[i] >= 0 && (
unsigned)M[i] != Idx) ||
4822 (M[i + 1] >= 0 && (
unsigned)M[i + 1] != Idx + NumElts))
4832 WhichResult = (M[0] == 0 ? 0 : 1);
4833 for (
unsigned i = 0; i != NumElts; ++i) {
4836 if ((
unsigned)M[i] != 2 * i + WhichResult)
4845 WhichResult = (M[0] == 0 ? 0 : 1);
4846 for (
unsigned i = 0; i < NumElts; i += 2) {
4847 if ((M[i] >= 0 && (
unsigned)M[i] != i + WhichResult) ||
4848 (M[i + 1] >= 0 && (
unsigned)M[i + 1] != i + NumElts + WhichResult))
4859 WhichResult = (M[0] == 0 ? 0 : 1);
4860 unsigned Idx = WhichResult * NumElts / 2;
4861 for (
unsigned i = 0; i != NumElts; i += 2) {
4862 if ((M[i] >= 0 && (
unsigned)M[i] != Idx) ||
4863 (M[i + 1] >= 0 && (
unsigned)M[i + 1] != Idx))
4876 WhichResult = (M[0] == 0 ? 0 : 1);
4877 for (
unsigned j = 0; j != 2; ++j) {
4878 unsigned Idx = WhichResult;
4879 for (
unsigned i = 0; i != Half; ++i) {
4880 int MIdx = M[i + j * Half];
4881 if (MIdx >= 0 && (
unsigned)MIdx != Idx)
4895 WhichResult = (M[0] == 0 ? 0 : 1);
4896 for (
unsigned i = 0; i < NumElts; i += 2) {
4897 if ((M[i] >= 0 && (
unsigned)M[i] != i + WhichResult) ||
4898 (M[i + 1] >= 0 && (
unsigned)M[i + 1] != i + WhichResult))
4905 bool &DstIsLeft,
int &Anomaly) {
4906 if (M.
size() !=
static_cast<size_t>(NumInputElements))
4909 int NumLHSMatch = 0, NumRHSMatch = 0;
4910 int LastLHSMismatch = -1, LastRHSMismatch = -1;
4912 for (
int i = 0; i < NumInputElements; ++i) {
4922 LastLHSMismatch = i;
4924 if (M[i] == i + NumInputElements)
4927 LastRHSMismatch = i;
4930 if (NumLHSMatch == NumInputElements - 1) {
4932 Anomaly = LastLHSMismatch;
4934 }
else if (NumRHSMatch == NumInputElements - 1) {
4936 Anomaly = LastRHSMismatch;
4949 for (
int I = 0, E = NumElts / 2;
I != E;
I++) {
4954 int Offset = NumElts / 2;
4955 for (
int I = NumElts / 2, E = NumElts;
I != E;
I++) {
4956 if (Mask[
I] !=
I + SplitLHS * Offset)
4968 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op)->getMask();
4997 unsigned OpNum = (PFEntry >> 26) & 0x0F;
4998 unsigned LHSID = (PFEntry >> 13) & ((1 << 13) - 1);
4999 unsigned RHSID = (PFEntry >> 0) & ((1 << 13) - 1);
5019 if (OpNum == OP_COPY) {
5020 if (LHSID == (1 * 9 + 2) * 9 + 3)
5022 assert(LHSID == ((4 * 9 + 5) * 9 + 6) * 9 + 7 &&
"Illegal OP_COPY!");
5056 else if (EltTy ==
MVT::i32 || EltTy == MVT::f32)
5066 return DAG.
getNode(Opcode, dl, VT, OpLHS, Lane);
5071 unsigned Imm = (OpNum - OP_VEXT1 + 1) *
getExtFactor(OpLHS);
5107 for (
int Val : ShuffleMask) {
5108 for (
unsigned Byte = 0; Byte < BytesPerElt; ++Byte) {
5109 unsigned Offset = Byte + Val * BytesPerElt;
5115 unsigned IndexLen = 8;
5134 if (IndexLen == 8) {
5164 if (EltType ==
MVT::i32 || EltType == MVT::f32)
5211 Lane += cast<ConstantSDNode>(V1.
getOperand(1))->getZExtValue();
5230 bool ReverseEXT =
false;
5232 if (
isEXTMask(ShuffleMask, VT, ReverseEXT, Imm)) {
5245 unsigned WhichResult;
5246 if (
isZIPMask(ShuffleMask, VT, WhichResult)) {
5248 return DAG.
getNode(Opc, dl, V1.getValueType(), V1,
V2);
5250 if (
isUZPMask(ShuffleMask, VT, WhichResult)) {
5252 return DAG.
getNode(Opc, dl, V1.getValueType(), V1,
V2);
5254 if (
isTRNMask(ShuffleMask, VT, WhichResult)) {
5256 return DAG.
getNode(Opc, dl, V1.getValueType(), V1,
V2);
5261 return DAG.
getNode(Opc, dl, V1.getValueType(), V1, V1);
5265 return DAG.
getNode(Opc, dl, V1.getValueType(), V1, V1);
5269 return DAG.
getNode(Opc, dl, V1.getValueType(), V1, V1);
5279 if (
isINSMask(ShuffleMask, NumInputElements, DstIsLeft, Anomaly)) {
5284 int SrcLane = ShuffleMask[Anomaly];
5285 if (SrcLane >= NumInputElements) {
5306 unsigned PFIndexes[4];
5307 for (
unsigned i = 0; i != 4; ++i) {
5308 if (ShuffleMask[i] < 0)
5311 PFIndexes[i] = ShuffleMask[i];
5315 unsigned PFTableIndex = PFIndexes[0] * 9 * 9 * 9 + PFIndexes[1] * 9 * 9 +
5316 PFIndexes[2] * 9 + PFIndexes[3];
5318 unsigned Cost = (PFEntry >> 30);
5330 APInt SplatBits, SplatUndef;
5331 unsigned SplatBitSize;
5333 if (BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
5336 for (
unsigned i = 0; i < NumSplats; ++i) {
5337 CnstBits <<= SplatBitSize;
5338 UndefBits <<= SplatBitSize;
5340 UndefBits |= (SplatBits ^ SplatUndef).zextOrTrunc(VT.
getSizeInBits());
5364 CnstBits = ~CnstBits;
5368 bool SecondTry =
false;
5371 if (CnstBits.getHiBits(64) == CnstBits.getLoBits(64)) {
5433 CnstBits = ~UndefBits;
5446 uint64_t &ConstVal) {
5455 for (
unsigned i = 1; i < NumElts; ++i)
5456 if (dyn_cast<ConstantSDNode>(Bvec->
getOperand(i)) != FirstElt)
5468 unsigned IID = cast<ConstantSDNode>(N->
getOperand(0))->getZExtValue();
5516 if (C2 > ElemSizeInBits)
5518 unsigned ElemMask = (1 << ElemSizeInBits) - 1;
5519 if ((C1 & ElemMask) != (~C2 & ElemMask))
5526 IsShiftRight ? Intrinsic::aarch64_neon_vsri : Intrinsic::aarch64_neon_vsli;
5532 DEBUG(
dbgs() <<
"aarch64-lower: transformed: \n");
5569 bool SecondTry =
false;
5572 if (CnstBits.getHiBits(64) == CnstBits.getLoBits(64)) {
5573 CnstBits = CnstBits.zextOrTrunc(64);
5574 uint64_t CnstVal = CnstBits.getZExtValue();
5634 CnstBits = UndefBits;
5660 cast<ConstantSDNode>(Lane)->getZExtValue());
5680 bool SecondTry =
false;
5683 if (CnstBits.getHiBits(64) == CnstBits.getLoBits(64)) {
5684 CnstBits = CnstBits.zextOrTrunc(64);
5685 uint64_t CnstVal = CnstBits.getZExtValue();
5691 if (VT.
isInteger() && (CnstVal == 0 || CnstVal == ~0ULL))
5884 CnstBits = UndefBits;
5901 bool isOnlyLowElement =
true;
5902 bool usesOnlyOneValue =
true;
5903 bool usesOnlyOneConstantValue =
true;
5904 bool isConstant =
true;
5905 unsigned NumConstantLanes = 0;
5908 for (
unsigned i = 0; i < NumElts; ++i) {
5913 isOnlyLowElement =
false;
5914 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
5917 if (isa<ConstantSDNode>(V) || isa<ConstantFPSDNode>(V)) {
5921 else if (ConstantValue != V)
5922 usesOnlyOneConstantValue =
false;
5927 else if (V != Value)
5928 usesOnlyOneValue =
false;
5934 if (isOnlyLowElement)
5939 if (usesOnlyOneValue) {
5954 return DAG.
getNode(Opcode, dl, VT, Value, Lane);
5961 "Unsupported floating-point vector type");
5963 for (
unsigned i = 0; i < NumElts; ++i)
5967 Val = LowerBUILD_VECTOR(Val, DAG);
5977 if (NumConstantLanes > 0 && usesOnlyOneConstantValue) {
5980 for (
unsigned i = 0; i < NumElts; ++i) {
5983 if (!isa<ConstantSDNode>(V) && !isa<ConstantFPSDNode>(V)) {
6011 if (!isConstant && !usesOnlyOneValue) {
6021 unsigned SubIdx = ElemSize == 32 ? AArch64::ssub : AArch64::dsub;
6028 for (; i < NumElts; ++i) {
6042 SDValue AArch64TargetLowering::LowerINSERT_VECTOR_ELT(
SDValue Op,
6049 if (!CI || CI->
getZExtValue() >= VT.getVectorNumElements())
6076 AArch64TargetLowering::LowerEXTRACT_VECTOR_ELT(
SDValue Op,
6083 if (!CI || CI->
getZExtValue() >= VT.getVectorNumElements())
6112 SDValue AArch64TargetLowering::LowerEXTRACT_SUBVECTOR(
SDValue Op,
6156 unsigned PFIndexes[4];
6157 for (
unsigned i = 0; i != 4; ++i) {
6161 PFIndexes[i] = M[i];
6165 unsigned PFTableIndex = PFIndexes[0] * 9 * 9 * 9 + PFIndexes[1] * 9 * 9 +
6166 PFIndexes[2] * 9 + PFIndexes[3];
6168 unsigned Cost = (PFEntry >> 30);
6176 unsigned DummyUnsigned;
6180 isEXTMask(M, VT, DummyBool, DummyUnsigned) ||
6199 APInt SplatBits, SplatUndef;
6200 unsigned SplatBitSize;
6202 if (!BVN || !BVN->
isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
6203 HasAnyUndefs, ElementBits) ||
6204 SplatBitSize > ElementBits)
6215 assert(VT.
isVector() &&
"vector shift count is not a vector type");
6219 return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits);
6230 assert(VT.
isVector() &&
"vector shift count is not a vector type");
6236 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits / 2 : ElementBits));
6239 SDValue AArch64TargetLowering::LowerVectorSRA_SRL_SHL(
SDValue Op,
6276 : Intrinsic::aarch64_neon_ushl;
6283 return NegShiftLeft;
6294 "function only supposed to emit natural comparisons");
6300 bool IsZero = IsCnst && (CnstBits == 0);
6437 unsigned Intrinsic)
const {
6439 switch (Intrinsic) {
6440 case Intrinsic::aarch64_neon_ld2:
6441 case Intrinsic::aarch64_neon_ld3:
6442 case Intrinsic::aarch64_neon_ld4:
6443 case Intrinsic::aarch64_neon_ld1x2:
6444 case Intrinsic::aarch64_neon_ld1x3:
6445 case Intrinsic::aarch64_neon_ld1x4:
6446 case Intrinsic::aarch64_neon_ld2lane:
6447 case Intrinsic::aarch64_neon_ld3lane:
6448 case Intrinsic::aarch64_neon_ld4lane:
6449 case Intrinsic::aarch64_neon_ld2r:
6450 case Intrinsic::aarch64_neon_ld3r:
6451 case Intrinsic::aarch64_neon_ld4r: {
6454 uint64_t NumElts = DL.getTypeAllocSize(I.
getType()) / 8;
6464 case Intrinsic::aarch64_neon_st2:
6465 case Intrinsic::aarch64_neon_st3:
6466 case Intrinsic::aarch64_neon_st4:
6467 case Intrinsic::aarch64_neon_st1x2:
6468 case Intrinsic::aarch64_neon_st1x3:
6469 case Intrinsic::aarch64_neon_st1x4:
6470 case Intrinsic::aarch64_neon_st2lane:
6471 case Intrinsic::aarch64_neon_st3lane:
6472 case Intrinsic::aarch64_neon_st4lane: {
6475 unsigned NumElts = 0;
6480 NumElts += DL.getTypeAllocSize(ArgTy) / 8;
6491 case Intrinsic::aarch64_ldaxr:
6492 case Intrinsic::aarch64_ldxr: {
6504 case Intrinsic::aarch64_stlxr:
6505 case Intrinsic::aarch64_stxr: {
6517 case Intrinsic::aarch64_ldaxp:
6518 case Intrinsic::aarch64_ldxp: {
6529 case Intrinsic::aarch64_stlxp:
6530 case Intrinsic::aarch64_stxp: {
6554 return NumBits1 > NumBits2;
6561 return NumBits1 > NumBits2;
6568 if (I->
getOpcode() != Instruction::FMul)
6577 !(User->
getOpcode() == Instruction::FSub ||
6578 User->
getOpcode() == Instruction::FAdd))
6600 return NumBits1 == 32 && NumBits2 == 64;
6607 return NumBits1 == 32 && NumBits2 == 64;
6625 bool AArch64TargetLowering::isExtFreeImpl(
const Instruction *
Ext)
const {
6626 if (isa<FPExtInst>(Ext))
6633 for (
const Use &U : Ext->
uses()) {
6638 const Instruction *Instr = cast<Instruction>(U.getUser());
6642 case Instruction::Shl:
6646 case Instruction::GetElementPtr: {
6659 if (ShiftAmt == 0 || ShiftAmt > 4)
6663 case Instruction::Trunc:
6680 unsigned &RequiredAligment)
const {
6684 RequiredAligment = 0;
6686 return NumBits == 32 || NumBits == 64;
6690 unsigned &RequiredAligment)
const {
6695 RequiredAligment = 0;
6697 return NumBits == 32 || NumBits == 64;
6715 "Invalid interleave factor");
6716 assert(!Shuffles.
empty() &&
"Empty shufflevector input");
6717 assert(Shuffles.
size() == Indices.
size() &&
6718 "Unmatched number of shufflevectors and indices");
6723 unsigned VecSize = DL.getTypeAllocSizeInBits(VecTy);
6726 if (VecSize != 64 && VecSize != 128)
6737 Type *Tys[2] = {VecTy, PtrTy};
6738 static const Intrinsic::ID LoadInts[3] = {Intrinsic::aarch64_neon_ld2,
6739 Intrinsic::aarch64_neon_ld3,
6740 Intrinsic::aarch64_neon_ld4};
6747 CallInst *LdN = Builder.CreateCall(LdNFunc, Ptr,
"ldN");
6751 for (
unsigned i = 0; i < Shuffles.
size(); i++) {
6753 unsigned Index = Indices[i];
6755 Value *SubVec = Builder.CreateExtractValue(LdN, Index);
6759 SubVec = Builder.CreateIntToPtr(SubVec, SVI->
getType());
6773 for (
unsigned i = 0; i < NumElts; i++)
6796 unsigned Factor)
const {
6798 "Invalid interleave factor");
6802 "Invalid interleaved store");
6812 if (SubVecSize != 64 && SubVecSize != 128)
6823 unsigned NumOpElts =
6835 Type *Tys[2] = {SubVecTy, PtrTy};
6836 static const Intrinsic::ID StoreInts[3] = {Intrinsic::aarch64_neon_st2,
6837 Intrinsic::aarch64_neon_st3,
6838 Intrinsic::aarch64_neon_st4};
6845 for (
unsigned i = 0; i < Factor; i++)
6855 unsigned AlignCheck) {
6856 return ((SrcAlign == 0 || SrcAlign % AlignCheck == 0) &&
6857 (DstAlign == 0 || DstAlign % AlignCheck == 0));
6861 unsigned SrcAlign,
bool IsMemset,
6870 if (Subtarget->
hasFPARMv8() && !IsMemset && Size >= 16 &&
6891 if ((Immed >> 12) == 0 || ((Immed & 0xfff) == 0 && Immed >> 24 == 0))
6908 unsigned AS)
const {
6926 uint64_t NumBytes = 0;
6929 NumBytes = NumBits / 8;
6938 if (Offset >= -(1LL << 9) && Offset <= (1LL << 9) - 1)
6942 unsigned shift =
Log2_64(NumBytes);
6943 if (NumBytes && Offset > 0 && (Offset / NumBytes) <= (1LL << 12) - 1 &&
6945 (Offset >> shift) << shift == Offset)
6953 (AM.
Scale > 0 && (uint64_t)AM.
Scale == NumBytes))
6960 unsigned AS)
const {
6997 static const MCPhysReg ScratchRegs[] = {
6998 AArch64::X16, AArch64::X17, AArch64::LR, 0
7031 if ((int64_t)Val < 0)
7034 Val &= (1LL << 32) - 1;
7037 unsigned Shift = (63 - LZ) / 16;
7081 AArch64TargetLowering::BuildSDIVPow2(
SDNode *N,
const APInt &Divisor,
7083 std::vector<SDNode *> *Created)
const {
7087 !(Divisor.
isPowerOf2() || (-Divisor).isPowerOf2()))
7103 Created->push_back(Cmp.
getNode());
7104 Created->push_back(Add.
getNode());
7105 Created->push_back(CSel.
getNode());
7118 Created->push_back(SRA.
getNode());
7139 APInt VM1 = Value - 1;
7148 APInt VP1 = Value + 1;
7158 APInt VNP1 = -Value + 1;
7167 APInt VNM1 = -Value - 1;
7207 if (!BV->isConstant())
7212 EVT IntVT = BV->getValueType(0);
7236 if (VT != MVT::f32 && VT !=
MVT::f64)
7304 uint32_t ShiftLHS = 0;
7310 uint32_t ShiftRHS = 0;
7317 if (LHSFromHi == RHSFromHi)
7352 uint64_t BitMask = Bits == 64 ? -1ULL : ((1ULL <<
Bits) - 1);
7353 for (
int i = 1; i >= 0; --i)
7354 for (
int j = 1; j >= 0; --j) {
7360 bool FoundMatch =
true;
7365 CN0->
getZExtValue() != (BitMask & ~CN1->getZExtValue())) {
7430 uint64_t idx = cast<ConstantSDNode>(Op0->
getOperand(1))->getZExtValue();
7435 if (idx != AArch64::dsub)
7450 DEBUG(
dbgs() <<
"aarch64-lower: bitcast extract_subvector simplification\n");
7497 for (
size_t i = 0; i < Mask.size(); ++i)
7538 DEBUG(
dbgs() <<
"aarch64-lower: concat_vectors bitcast simplification\n");
7580 "unexpected vector size on extract_vector_elt!");
7706 cast<ConstantSDNode>(Op.
getOperand(2))->getZExtValue());
7715 if (!TValue || !FValue)
7719 if (!TValue->
isOne()) {
7725 return TValue->
isOne() && FValue->isNullValue();
7851 "unexpected shape for long operation");
7874 int64_t ShiftAmount;
7876 APInt SplatValue, SplatUndef;
7877 unsigned SplatBitSize;
7880 HasAnyUndefs, ElemBits) ||
7881 SplatBitSize != ElemBits)
7886 ShiftAmount = CVN->getSExtValue();
7895 case Intrinsic::aarch64_neon_sqshl:
7897 IsRightShift =
false;
7899 case Intrinsic::aarch64_neon_uqshl:
7901 IsRightShift =
false;
7903 case Intrinsic::aarch64_neon_srshl:
7905 IsRightShift =
true;
7907 case Intrinsic::aarch64_neon_urshl:
7909 IsRightShift =
true;
7911 case Intrinsic::aarch64_neon_sqshlu:
7913 IsRightShift =
false;
7917 if (IsRightShift && ShiftAmount <= -1 && ShiftAmount >= -(
int)ElemBits) {
7921 }
else if (!IsRightShift && ShiftAmount >= 0 && ShiftAmount < ElemBits) {
7964 case Intrinsic::aarch64_neon_vcvtfxs2fp:
7965 case Intrinsic::aarch64_neon_vcvtfxu2fp:
7968 case Intrinsic::aarch64_neon_saddv:
7970 case Intrinsic::aarch64_neon_uaddv:
7972 case Intrinsic::aarch64_neon_sminv:
7974 case Intrinsic::aarch64_neon_uminv:
7976 case Intrinsic::aarch64_neon_smaxv:
7978 case Intrinsic::aarch64_neon_umaxv:
7980 case Intrinsic::aarch64_neon_fmax:
7983 case Intrinsic::aarch64_neon_fmin:
7986 case Intrinsic::aarch64_neon_smull:
7987 case Intrinsic::aarch64_neon_umull:
7988 case Intrinsic::aarch64_neon_pmull:
7989 case Intrinsic::aarch64_neon_sqdmull:
7991 case Intrinsic::aarch64_neon_sqshl:
7992 case Intrinsic::aarch64_neon_uqshl:
7993 case Intrinsic::aarch64_neon_sqshlu:
7994 case Intrinsic::aarch64_neon_srshl:
7995 case Intrinsic::aarch64_neon_urshl:
7997 case Intrinsic::aarch64_crc32b:
7998 case Intrinsic::aarch64_crc32cb:
8000 case Intrinsic::aarch64_crc32h:
8001 case Intrinsic::aarch64_crc32ch:
8018 if (IID == Intrinsic::aarch64_neon_sabd ||
8019 IID == Intrinsic::aarch64_neon_uabd) {
8087 assert(!(NumElements & 1) &&
"Splitting vector, but not in half!");
8125 if (NumVecElts != 4 && NumVecElts != 2)
8128 unsigned RemainInsertElts = NumVecElts - 1;
8131 while (--RemainInsertElts) {
8137 StVal = NextInsertElt;
8140 unsigned EltOffset = NumVecElts == 4 ? 4 : 8;
8141 unsigned Alignment =
std::min(OrigAlignment, EltOffset);
8153 unsigned Offset = EltOffset;
8154 while (--NumVecElts) {
8160 Offset += EltOffset;
8208 if (ReplacedSplat !=
SDValue())
8209 return ReplacedSplat;
8242 unsigned LoadIdx = IsLaneOp ? 1 : 0;
8258 if (UI.getUse().getResNo() == 1)
8271 || UI.getUse().getResNo() != Addr.
getResNo())
8286 uint32_t IncVal = CInc->getZExtValue();
8288 if (IncVal != NumBytes)
8345 UI.getUse().getResNo() != Addr.
getResNo())
8354 bool IsStore =
false;
8355 bool IsLaneOp =
false;
8356 bool IsDupOp =
false;
8357 unsigned NewOpc = 0;
8358 unsigned NumVecs = 0;
8359 unsigned IntNo = cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue();
8369 NumVecs = 2; IsStore =
true;
break;
8371 NumVecs = 3; IsStore =
true;
break;
8373 NumVecs = 4; IsStore =
true;
break;
8381 NumVecs = 2; IsStore =
true;
break;
8383 NumVecs = 3; IsStore =
true;
break;
8385 NumVecs = 4; IsStore =
true;
break;
8387 NumVecs = 2; IsDupOp =
true;
break;
8389 NumVecs = 3; IsDupOp =
true;
break;
8391 NumVecs = 4; IsDupOp =
true;
break;
8393 NumVecs = 2; IsLaneOp =
true;
break;
8395 NumVecs = 3; IsLaneOp =
true;
break;
8397 NumVecs = 4; IsLaneOp =
true;
break;
8399 NumVecs = 2; IsStore =
true; IsLaneOp =
true;
break;
8401 NumVecs = 3; IsStore =
true; IsLaneOp =
true;
break;
8403 NumVecs = 4; IsStore =
true; IsLaneOp =
true;
break;
8415 uint32_t IncVal = CInc->getZExtValue();
8417 if (IsLaneOp || IsDupOp)
8419 if (IncVal != NumBytes)
8426 if (IsLaneOp || IsStore)
8427 for (
unsigned i = 2; i < AddrOpIdx; ++i)
8434 unsigned NumResultVecs = (IsStore ? 0 : NumVecs);
8436 for (n = 0; n < NumResultVecs; ++n)
8448 std::vector<SDValue> NewResults;
8449 for (
unsigned i = 0; i < NumResultVecs; ++i) {
8450 NewResults.push_back(
SDValue(UpdN.getNode(), i));
8452 NewResults.push_back(
SDValue(UpdN.getNode(), NumResultVecs + 1));
8572 signed CompConstant) {
8576 signed MaxUInt = (1 << width);
8584 AddConstant -= (1 << (width-1));
8589 if ((AddConstant == 0) ||
8590 (CompConstant == MaxUInt - 1 && AddConstant < 0) ||
8591 (AddConstant >= 0 && CompConstant < 0) ||
8592 (AddConstant <= 0 && CompConstant <= 0 && CompConstant < AddConstant))
8597 if ((AddConstant == 0) ||
8598 (AddConstant >= 0 && CompConstant <= 0) ||
8599 (AddConstant <= 0 && CompConstant <= 0 && CompConstant <= AddConstant))
8604 if ((AddConstant >= 0 && CompConstant < 0) ||
8605 (AddConstant <= 0 && CompConstant >= -1 &&
8606 CompConstant < AddConstant + MaxUInt))
8611 if ((AddConstant == 0) ||
8612 (AddConstant > 0 && CompConstant <= 0) ||
8613 (AddConstant < 0 && CompConstant <= AddConstant))
8618 if ((AddConstant >= 0 && CompConstant <= 0) ||
8619 (AddConstant <= 0 && CompConstant >= 0 &&
8620 CompConstant <= AddConstant + MaxUInt))
8625 if ((AddConstant > 0 && CompConstant < 0) ||
8626 (AddConstant < 0 && CompConstant >= 0 &&
8627 CompConstant < AddConstant + MaxUInt) ||
8628 (AddConstant >= 0 && CompConstant >= 0 &&
8629 CompConstant >= AddConstant) ||
8630 (AddConstant <= 0 && CompConstant < 0 && CompConstant < AddConstant))
8650 unsigned CmpIndex) {
8651 unsigned CC = cast<ConstantSDNode>(N->
getOperand(CCIndex))->getSExtValue();
8653 unsigned CondOpcode = SubsNode->
getOpcode();
8662 unsigned MaskBits = 0;
8668 uint32_t CNV = CN->getZExtValue();
8671 else if (CNV == 65535)
8692 if (!isa<ConstantSDNode>(AddInputValue2.
getNode()) ||
8693 !isa<ConstantSDNode>(SubsInputValue.
getNode()))
8704 cast<ConstantSDNode>(AddInputValue2.
getNode())->getSExtValue(),
8705 cast<ConstantSDNode>(SubsInputValue.
getNode())->getSExtValue()))
8732 assert(isa<ConstantSDNode>(CCVal) &&
"Expected a ConstantSDNode here!");
8733 unsigned CC = cast<ConstantSDNode>(CCVal)->getZExtValue();
8750 "Expected the value type to be the same for both operands!");
8754 if (isa<ConstantSDNode>(LHS) && cast<ConstantSDNode>(LHS)->isNullValue())
8757 if (!isa<ConstantSDNode>(RHS) || !cast<ConstantSDNode>(RHS)->isNullValue())
8802 cast<CondCodeSDNode>(N0.
getOperand(2))->
get());
8824 "Scalar-SETCC feeding SELECT has unexpected result type!");
8837 if (!ResVT.
isVector() || NumMaskElts == 0)
8897 bool IsUnordered =
false, IsOrEqual;
8996 switch (cast<ConstantSDNode>(N->
getOperand(1))->getZExtValue()) {
8997 case Intrinsic::aarch64_neon_ld2:
8998 case Intrinsic::aarch64_neon_ld3:
8999 case Intrinsic::aarch64_neon_ld4:
9000 case Intrinsic::aarch64_neon_ld1x2:
9001 case Intrinsic::aarch64_neon_ld1x3:
9002 case Intrinsic::aarch64_neon_ld1x4:
9003 case Intrinsic::aarch64_neon_ld2lane:
9004 case Intrinsic::aarch64_neon_ld3lane:
9005 case Intrinsic::aarch64_neon_ld4lane:
9006 case Intrinsic::aarch64_neon_ld2r:
9007 case Intrinsic::aarch64_neon_ld3r:
9008 case Intrinsic::aarch64_neon_ld4r:
9009 case Intrinsic::aarch64_neon_st2:
9010 case Intrinsic::aarch64_neon_st3:
9011 case Intrinsic::aarch64_neon_st4:
9012 case Intrinsic::aarch64_neon_st1x2:
9013 case Intrinsic::aarch64_neon_st1x3:
9014 case Intrinsic::aarch64_neon_st1x4:
9015 case Intrinsic::aarch64_neon_st2lane:
9016 case Intrinsic::aarch64_neon_st3lane:
9017 case Intrinsic::aarch64_neon_st4lane:
9030 bool AArch64TargetLowering::isUsedByReturnOnly(
SDNode *N,
9049 bool HasRet =
false;
9067 bool AArch64TargetLowering::mayBeEmittedAsTailCall(
CallInst *CI)
const {
9074 bool AArch64TargetLowering::getIndexedAddressParts(
SDNode *Op,
SDValue &Base,
9086 int64_t RHSC = (int64_t)RHS->getZExtValue();
9087 if (RHSC >= 256 || RHSC <= -256)
9096 bool AArch64TargetLowering::getPreIndexedAddressParts(
SDNode *N,
SDValue &Base,
9103 VT =
LD->getMemoryVT();
9104 Ptr =
LD->getBasePtr();
9106 VT =
ST->getMemoryVT();
9107 Ptr =
ST->getBasePtr();
9112 if (!getIndexedAddressParts(Ptr.
getNode(), Base, Offset, AM, IsInc, DAG))
9118 bool AArch64TargetLowering::getPostIndexedAddressParts(
9124 VT =
LD->getMemoryVT();
9125 Ptr =
LD->getBasePtr();
9127 VT =
ST->getMemoryVT();
9128 Ptr =
ST->getBasePtr();
9133 if (!getIndexedAddressParts(Op, Base, Offset, AM, IsInc, DAG))
9160 void AArch64TargetLowering::ReplaceNodeResults(
9180 bool AArch64TargetLowering::combineRepeatedFPDivisors(
unsigned NumUsers)
const {
9183 return NumUsers > 2;
9229 Type *ValTy = cast<PointerType>(Addr->
getType())->getElementType();
9237 IsAcquire ? Intrinsic::aarch64_ldaxp : Intrinsic::aarch64_ldxp;
9241 Value *LoHi = Builder.
CreateCall(Ldxr, Addr,
"lohi");
9253 IsAcquire ? Intrinsic::aarch64_ldaxr : Intrinsic::aarch64_ldxr;
9258 cast<PointerType>(Addr->
getType())->getElementType());
9262 Value *Val, Value *Addr,
9272 IsRelease ? Intrinsic::aarch64_stlxp : Intrinsic::aarch64_stxp;
9276 Value *Lo = Builder.
CreateTrunc(Val, Int64Ty,
"lo");
9283 IsRelease ? Intrinsic::aarch64_stlxr : Intrinsic::aarch64_stxr;
9293 bool AArch64TargetLowering::functionArgumentNeedsConsecutiveRegisters(
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
static bool isAdvSIMDModImmType6(uint64_t Imm)
static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt)
isVShiftLImm - Check if this is a valid build_vector for the immediate operand of a vector shift left...
void setFrameAddressIsTaken(bool T)
std::enable_if< std::numeric_limits< T >::is_signed, bool >::type getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Value * getValueOperand()
unsigned getFunctionAlignment(const Function *F) const
getFunctionAlignment - Return the Log2 alignment of this function.
ValuesClass< DataType > LLVM_END_WITH_NULL values(const char *Arg, DataType Val, const char *Desc,...)
Helper structure to keep track of SetCC information.
static MVT getIntegerVT(unsigned BitWidth)
static bool isUZP_v_undef_Mask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
isUZP_v_undef_Mask - Special case of isUZPMask for canonical form of "vector_shuffle v...
void AnalyzeCallResult(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeCallResult - Analyze the return values of a call, incorporating info about the passed values i...
void push_back(const T &Elt)
const MachineFunction * getParent() const
getParent - Return the MachineFunction containing this basic block.
A parsed version of the target data layout string in and methods for querying it. ...
static SDValue performBitcastCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
SDValue getValue(unsigned R) const
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
MO_G3 - A symbol operand with this flag (granule 3) represents the high 16-bits of a 64-bit address...
static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG)
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
iterator_range< use_iterator > uses()
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand...
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
LLVMContext * getContext() const
static bool isEquivalentMaskless(unsigned CC, unsigned width, ISD::LoadExtType ExtType, signed AddConstant, signed CompConstant)
static uint8_t encodeAdvSIMDModImmType3(uint64_t Imm)
bool isOSBinFormatMachO() const
Tests whether the environment is MachO.
SDValue getCopyToReg(SDValue Chain, SDLoc dl, unsigned Reg, SDValue N)
uint64_t getZExtValue() const
Get zero extended value.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, SDLoc DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd)...
void setVarArgsGPRSize(unsigned Size)
unsigned getVarArgsFPRSize() const
SDValue CombineTo(SDNode *N, ArrayRef< SDValue > To, bool AddTo=true)
static SDValue performVSelectCombine(SDNode *N, SelectionDAG &DAG)
void dump() const
Dump this node, for debugging.
static SDValue performNEONPostLDSTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
Target-specific DAG combine function for NEON load/store intrinsics to merge base address updates...
int getVarArgsStackIndex() const
STATISTIC(NumFunctions,"Total number of functions")
size_t size() const
size - Get the string size.
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR (an vector value) starting with the ...
bool isKnownNeverNaN(SDValue Op) const
Test whether the given SDValue is known to never be NaN.
BR_CC - Conditional branch.
LocInfo getLocInfo() const
static bool isUZPMask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
MO_PAGE - A symbol operand with this flag represents the pc-relative offset of the 4K page containing...
static bool isTRN_v_undef_Mask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
isTRN_v_undef_Mask - Special case of isTRNMask for canonical form of "vector_shuffle v...
static MVT getVectorVT(MVT VT, unsigned NumElements)
bool hasOneUse() const
Return true if there is exactly one use of this node.
static const fltSemantics IEEEdouble
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
A Module instance is used to store all the information related to an LLVM module. ...
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
static SDValue tryCombineToEXTR(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
EXTR instruction extracts a contiguous chunk of bits from two existing registers viewed as a high/low...
static bool isAdvSIMDModImmType12(uint64_t Imm)
const TargetMachine & getTargetMachine() const
bool isAtLeastAcquire(AtomicOrdering Ord)
Returns true if the ordering is at least as strong as acquire (i.e.
static bool isAdvSIMDModImmType4(uint64_t Imm)
static SDValue performXorCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
static uint8_t encodeAdvSIMDModImmType1(uint64_t Imm)
bool isCalledByLegalizer() const
SDValue getMergeValues(ArrayRef< SDValue > Ops, SDLoc dl)
Create a MERGE_VALUES node from the given operands.
int getSplatIndex() const
Carry-setting nodes for multiple precision addition and subtraction.
const TargetMachine & getTarget() const
static bool isINSMask(ArrayRef< int > M, int NumInputElements, bool &DstIsLeft, int &Anomaly)
static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG)
TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(EVT VT) const override
Return the preferred vector type legalization action.
unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
static CondCode getInvertedCondCode(CondCode Code)
void setVarArgsStackIndex(int Index)
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
A Stackmap instruction captures the location of live variables at its position in the instruction str...
static bool isZIP_v_undef_Mask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
isZIP_v_undef_Mask - Special case of isZIPMask for canonical form of "vector_shuffle v...
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain...
CallInst - This class represents a function call, abstracting a target machine's calling convention...
void computeKnownBits(SDValue Op, APInt &KnownZero, APInt &KnownOne, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in the KnownZero/KnownO...
const GlobalValue * getGlobal() const
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit...
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change...
static MVT getFloatingPointVT(unsigned BitWidth)
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS) const override
Return the cost of the scaling factor used in the addressing mode represented by AM for this target...
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
void addLiveIn(unsigned Reg)
Adds the specified register as a live in.
std::pair< Value *, Value * > ShuffleOps
We are building a shuffle to create V, which is a sequence of insertelement, extractelement pairs...
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
Type * getTypeForEVT(LLVMContext &Context) const
getTypeForEVT - This method returns an LLVM type corresponding to the specified EVT.
unsigned getSizeInBits() const
ShuffleVectorInst - This instruction constructs a fixed permutation of two input vectors.
static SDValue performSelectCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
A vector select: "(select vL, vR, (setcc LHS, RHS))" is best performed with the compare-mask instruct...
SDValue getLoad(EVT VT, SDLoc dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, bool isVolatile, bool isNonTemporal, bool isInvariant, unsigned Alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands...
void setVarArgsFPRIndex(int Index)
unsigned getByValSize() const
static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N, SelectionDAG &DAG)
unsigned getNumOperands() const
Return the number of values used by this operation.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
const Function * getParent() const
Return the enclosing method, or null if none.
unsigned getNumOperands() const
static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG)
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *fromMBB)
transferSuccessorsAndUpdatePHIs - Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor blocks which refer to fromMBB to refer to this.
const SDValue & getOperand(unsigned Num) const
MO_G0 - A symbol operand with this flag (granule 0) represents the bits 0-15 of a 64-bit address...
const Function * getFunction() const
getFunction - Return the LLVM function that this machine code represents
static SDValue performExtendCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
static SDValue NormalizeBuildVector(SDValue Op, SelectionDAG &DAG)
LoadInst - an instruction for reading from memory.
static unsigned getDUPLANEOp(EVT EltType)
static MachinePointerInfo getConstantPool()
getConstantPool - Return a MachinePointerInfo record that refers to the constant pool.
static IntegerType * getInt64Ty(LLVMContext &C)
static bool isAdvSIMDModImmType3(uint64_t Imm)
[US]{MIN/MAX} - Binary minimum or maximum or signed or unsigned integers.
AtomicRMWInst - an instruction that atomically reads a memory location, combines it with another valu...
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned char TargetFlags=0)
static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG)
static SDValue performSetccAddFolding(SDNode *Op, SelectionDAG &DAG)
bool CheckReturn(const SmallVectorImpl< ISD::OutputArg > &ArgsFlags, CCAssignFn Fn)
CheckReturn - Analyze the return values of a function, returning true if the return can be performed ...
MachineBasicBlock * EmitF128CSEL(MachineInstr *MI, MachineBasicBlock *BB) const
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
static SDValue tryCombineFixedPointConvert(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
static std::error_code getOffset(const SymbolRef &Sym, SectionRef Sec, uint64_t &Result)
const SDValue & getBasePtr() const
bool lowerInterleavedLoad(LoadInst *LI, ArrayRef< ShuffleVectorInst * > Shuffles, ArrayRef< unsigned > Indices, unsigned Factor) const override
Lower an interleaved load into a ldN intrinsic.
INSERT_SUBVECTOR(VECTOR1, VECTOR2, IDX) - Returns a vector with VECTOR2 inserted into VECTOR1 at the ...
APInt LLVM_ATTRIBUTE_UNUSED_RESULT zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
std::size_t countLeadingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the most significant bit to the least stopping at the first 1...
void setVarArgsFPRSize(unsigned Size)
static bool isZeroExtended(SDNode *N, SelectionDAG &DAG)
static std::pair< SDValue, SDValue > getAArch64XALUOOp(AArch64CC::CondCode &CC, SDValue Op, SelectionDAG &DAG)
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
unsigned getResNo() const
get the index which selects a specific result in the SDNode
Libcall getFPROUND(EVT OpVT, EVT RetVT)
getFPROUND - Return the FPROUND_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
bool bitsLT(EVT VT) const
bitsLT - Return true if this has less bits than VT.
bool isUnsignedIntSetCC(CondCode Code)
isUnsignedIntSetCC - Return true if this is a setcc instruction that performs an unsigned comparison ...
static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG)
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(const char *reason, bool gen_crash_diag=true)
Reports a serious error, calling any installed error handler.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
SDValue getExternalSymbol(const char *Sym, EVT VT)
bool isAllOnesValue() const
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
uint64_t getTypeAllocSizeInBits(Type *Ty) const
Returns the offset in bits between successive objects of the specified type, including alignment padd...
const uint32_t * getThisReturnPreservedMask(const MachineFunction &MF, CallingConv::ID) const
getThisReturnPreservedMask - Returns a call preserved mask specific to the case that 'returned' is on...
bool isOSWindows() const
Tests whether the OS is Windows.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
SDValue getMemcpy(SDValue Chain, SDLoc dl, SDValue Dst, SDValue Src, SDValue Size, unsigned Align, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo)
static MachinePointerInfo getFixedStack(int FI, int64_t offset=0)
getFixedStack - Return a MachinePointerInfo record that refers to the the specified FrameIndex...
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
Libcall getUINTTOFP(EVT OpVT, EVT RetVT)
getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
const Triple & getTargetTriple() const
MO_G2 - A symbol operand with this flag (granule 2) represents the bits 32-47 of a 64-bit address...
bool isKnownNeverZero(SDValue Op) const
Test whether the given SDValue is known to never be positive or negative Zero.
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
AtomicRMWExpansionKind
Enum that specifies what a AtomicRMWInst is expanded to, if at all.
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations...
bool isVector() const
isVector - Return true if this is a vector value type.
static bool isAdvSIMDModImmType7(uint64_t Imm)
static SDValue tryFormConcatFromShuffle(SDValue Op, SelectionDAG &DAG)
BlockAddress - The address of a basic block.
SDValue getStore(SDValue Chain, SDLoc dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, bool isVolatile, bool isNonTemporal, unsigned Alignment, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
static bool isTRNMask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic, int64_t &Cnt)
isVShiftRImm - Check if this is a valid build_vector for the immediate operand of a vector shift righ...
cl::opt< bool > EnableAArch64ELFLocalDynamicTLSGeneration("aarch64-elf-ldtls-generation", cl::Hidden, cl::desc("Allow AArch64 Local Dynamic TLS code generation"), cl::init(false))
static SDValue performCONDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG, unsigned CCIndex, unsigned CmpIndex)
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
int64_t getOffset() const
const HexagonInstrInfo * TII
static SDValue tryCombineCRC32(unsigned Mask, SDNode *N, SelectionDAG &DAG)
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
Shift and rotation operations.
static SDValue performAddSubLongCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
StructType - Class to represent struct types.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
A Use represents the edge between a Value definition and its users.
bool hasInternalLinkage() const
MachineFunction & getMachineFunction() const
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, const SDValue *Ops, unsigned NumOps, bool isSigned, SDLoc dl, bool doesNotReturn=false, bool isReturnValueUsed=true) const
Returns a pair of (return value, chain).
static void advance(T &it, size_t Val)
static uint8_t encodeAdvSIMDModImmType6(uint64_t Imm)
unsigned getNumArgOperands() const
getNumArgOperands - Return the number of call arguments.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
SDValue getTargetGlobalAddress(const GlobalValue *GV, SDLoc DL, EVT VT, int64_t offset=0, unsigned char TargetFlags=0)
CopyToReg - This node has three operands: a chain, a register number to set to this value...
static Constant * get(ArrayRef< Constant * > V)
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - Return the ISD::SETCC ValueType
Helper structure to keep track of a SET_CC lowered into AArch64 code.
Reg
All possible values of the reg field in the ModR/M byte.
static SDValue EmitVectorComparison(SDValue LHS, SDValue RHS, AArch64CC::CondCode CC, bool NoNans, EVT VT, SDLoc dl, SelectionDAG &DAG)
bool isSized(SmallPtrSetImpl< const Type * > *Visited=nullptr) const
isSized - Return true if it makes sense to take the size of this type.
MVT getScalarType() const
getScalarType - If this is a vector type, return the element type, otherwise return this...
static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG)
EVT getScalarType() const
getScalarType - If this is a vector type, return the element type, otherwise return this...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
Number of individual test Apply this number of consecutive mutations to each input exit after the first new interesting input is found the minimized corpus is saved into the first input directory Number of jobs to run If min(jobs, NumberOfCpuCores()/2)\" is used.") FUZZER_FLAG_INT(reload
static bool isLogicalImmediate(uint64_t imm, unsigned regSize)
isLogicalImmediate - Return true if the immediate is valid for a logical immediate instruction of the...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
static SDValue combineAcrossLanesIntrinsic(unsigned Opc, SDNode *N, SelectionDAG &DAG)
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
This class is used to represent EVT's, which are used to parameterize some operations.
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG...
Type * getVectorElementType() const
static bool isAdvSIMDModImmType5(uint64_t Imm)
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
MO_CONSTPOOL - This flag indicates that a symbol operand represents the address of a constant pool en...
bool isInteger() const
isInteger - Return true if this is an integer, or a vector integer type.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
STACKSAVE - STACKSAVE has one operand, an input chain.
EVT getVectorElementType() const
getVectorElementType - Given a vector type, return the type of each element.
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const
bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector...
static const unsigned PerfectShuffleTable[6561+1]
bool isInConsecutiveRegs() const
SDValue getCALLSEQ_START(SDValue Chain, SDValue Op, SDLoc DL)
Return a new CALLSEQ_START node, which always must have a glue result (to ensure it's not CSE'd)...
unsigned getLocReg() const
void setArgumentStackToRestore(unsigned bytes)
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
unsigned NoNaNsFPMath
NoNaNsFPMath - This flag is enabled when the -enable-no-nans-fp-math flag is specified on the command...
static SDValue performConcatVectorsCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose...
static bool isEXTMask(ArrayRef< int > M, EVT VT, bool &ReverseEXT, unsigned &Imm)
LLVMContext & getContext() const
getContext - Return the LLVMContext in which this type was uniqued.
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
static bool checkValueWidth(SDValue V, unsigned width, ISD::LoadExtType &ExtType)
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
bool LLVM_ATTRIBUTE_UNUSED_RESULT empty() const
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
SDValue getRegisterMask(const uint32_t *RegMask)
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace=0, unsigned Align=1, bool *Fast=nullptr) const override
allowsMisalignedMemoryAccesses - Returns true if the target allows unaligned memory accesses of the s...
const AArch64RegisterInfo * getRegisterInfo() const override
bool hasStructRetAttr() const
Determine if the function returns a structure through first pointer argument.
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
Simple integer binary arithmetic operators.
SDValue getVectorShuffle(EVT VT, SDLoc dl, SDValue N1, SDValue N2, const int *MaskElts)
Return an ISD::VECTOR_SHUFFLE node.
bool isMask_64(uint64_t Value)
isMask_64 - This function returns true if the argument is a non-empty sequence of ones starting at th...
static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, SDValue &AArch64cc, SelectionDAG &DAG, SDLoc dl)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
bool isFloatingPointTy() const
isFloatingPointTy - Return true if this is one of the six floating point types
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
const SDValue & getBasePtr() const
static bool isAdvSIMDModImmType2(uint64_t Imm)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
MachineBasicBlock * emitPatchPoint(MachineInstr *MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
StoreInst - an instruction for storing to memory.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
const APInt & getAPIntValue() const
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
bool isArrayTy() const
isArrayTy - True if this is an instance of ArrayType.
TargetConstant* - Like Constant*, but the DAG does not do any folding, simplification, or lowering of the constant.
EVT getMemoryVT() const
Return the type of the in-memory value.
bool isFPImmLegal(const APFloat &Imm, EVT VT) const override
Returns true if the target can instruction select the specified FP immediate natively.
TargetLoweringBase::AtomicRMWExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all...
static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG)
static const MCPhysReg GPRArgRegs[]
void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero, APInt &KnownOne, const SelectionDAG &DAG, unsigned Depth=0) const override
computeKnownBitsForTargetNode - Determine which of the bits specified in Mask are known to be either ...
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
Type * getElementType() const
size_t size() const
size - Get the array size.
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
const DataLayout & getDataLayout() const
static SDValue WidenVector(SDValue V64Reg, SelectionDAG &DAG)
WidenVector - Given a value in the V64 register class, produce the equivalent value in the V128 regis...
bool hasPairedLoad(Type *LoadedType, unsigned &RequiredAligment) const override
Return true if the target supplies and combines to a paired load two loaded values of type LoadedType...
static bool isREVMask(ArrayRef< int > M, EVT VT, unsigned BlockSize)
isREVMask - Check if a vector shuffle corresponds to a REV instruction with the specified blocksize...
static SDValue tryExtendDUPToExtractHigh(SDValue N, SelectionDAG &DAG)
PointerType - Class to represent pointers.
const BasicBlock * getBasicBlock() const
getBasicBlock - Return the LLVM basic block that this instance corresponded to originally.
UNDEF - An undefined node.
static bool isEssentiallyExtractSubvector(SDValue N)
This class is used to represent ISD::STORE nodes.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
static SDValue performBRCONDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
Flag
These should be considered private to the implementation of the MCInstrDesc class.
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the specified, possibly variable...
TargetInstrInfo - Interface to description of machine instruction set.
LLVM_CONSTEXPR size_t array_lengthof(T(&)[N])
Find the length of an array.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Get a value with high bits set.
bool isLegalICmpImmediate(int64_t) const override
Return true if the specified immediate is legal icmp immediate, that is the target has icmp instructi...
static bool isAdvSIMDModImmType9(uint64_t Imm)
SDNode * getNode() const
get the SDNode which holds the desired result
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
Return true if it's free to truncate a value of type Ty1 to type Ty2.
bundle_iterator< MachineInstr, instr_iterator > iterator
A self-contained host- and target-independent arbitrary-precision floating-point software implementat...
static SDValue tryCombineToBSL(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
unsigned getScalarSizeInBits() const
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
bool hasLoadLinkedStoreConditional() const override
True if AtomicExpandPass should use emitLoadLinked/emitStoreConditional and expand AtomicCmpXchgInst...
A switch()-like statement whose cases are string literals.
Type * getParamType(unsigned i) const
Parameter type accessors.
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set, or Regs.size() if they are all allocated.
initializer< Ty > init(const Ty &Val)
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
unsigned UnsafeFPMath
UnsafeFPMath - This flag is enabled when the -enable-unsafe-fp-math flag is specified on the command ...
Patchable call instruction - this instruction represents a call to a constant address, followed by a series of NOPs.
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fuse-fp-ops=xxx option.
Control flow instructions. These all have token chains.
MO_G1 - A symbol operand with this flag (granule 1) represents the bits 16-31 of a 64-bit address...
std::size_t countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the least significant bit to the most stopping at the first 1...
static int getFP32Imm(const APInt &Imm)
getFP32Imm - Return an 8-bit floating-point version of the 32-bit floating-point value.
unsigned getVectorNumElements() const
bool isZExtFree(Type *Ty1, Type *Ty2) const override
Return true if any actual instruction that defines a value of type Ty1 implicitly zero-extends the va...
CodeModel::Model getCodeModel() const
Returns the code model.
MVT - Machine Value Type.
LLVM Basic Block Representation.
const SDValue & getOperand(unsigned i) const
The instances of the Type class are immutable: once they are created, they are never changed...
This is an important class for using LLVM in a threaded context.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type...
Simple binary floating point operators.
void setTargetDAGCombine(ISD::NodeType NT)
Targets should invoke this method for each target independent node that they want to provide a custom...
bool isNonTemporal() const
bool isVectorTy() const
isVectorTy - True if this is an instance of VectorType.
bool isOperationLegalOrCustom(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
This is an important base class in LLVM.
void incNumLocalDynamicTLSAccesses()
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE...
bool isVector() const
isVector - Return true if this is a vector value type.
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
int64_t getSExtValue() const
Get sign extended value.
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL...
const Constant * getConstVal() const
const MachineOperand & getOperand(unsigned i) const
bool isBeforeLegalizeOps() const
Carry-using nodes for multiple precision addition and subtraction.
bool isFloatTy() const
isFloatTy - Return true if this is 'float', a 32-bit IEEE fp type.
static SDValue tryCombineLongOpWithDup(unsigned IID, SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG)
INSERT_SUBREG - This instruction takes three operands: a register that has subregisters, a register providing an insert value, and a subregister index.
bool isTargetMachO() const
bool isLittleEndian() const
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
Libcall getFPTOSINT(EVT OpVT, EVT RetVT)
getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
static bool isSetCCOrZExtSetCC(const SDValue &Op, SetCCInfoAndKind &Info)
static EVT getExtensionTo64Bits(const EVT &OrigVT)
static bool isAdvSIMDModImmType1(uint64_t Imm)
static mvt_range fp_valuetypes()
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool isDesirableToCommuteWithShift(const SDNode *N) const override
Returns false if N is a bit extraction pattern of (X >> C) & Mask.
This class provides iterator support for SDUse operands that use a specific SDNode.
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself...
SDValue getCopyFromReg(SDValue Chain, SDLoc dl, unsigned Reg, EVT VT)
SDValue getSExtOrTrunc(SDValue Op, SDLoc DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
unsigned getBytesInStackArgArea() const
bool isBeforeLegalize() const
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang","erlang-compatible garbage collector")
SDValue getTargetConstant(uint64_t Val, SDLoc DL, EVT VT, bool isOpaque=false)
unsigned getBitWidth() const
Return the number of bits in the APInt.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
opStatus convert(const fltSemantics &, roundingMode, bool *)
APFloat::convert - convert a value of one floating point type to another.
unsigned getOpcode() const
TRAP - Trapping instruction.
unsigned GuaranteedTailCallOpt
GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is specified on the commandline...
Value * getOperand(unsigned i) const
Value * getPointerOperand()
static bool isAdvSIMDModImmType8(uint64_t Imm)
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
static mvt_range vector_valuetypes()
int getVarArgsGPRIndex() const
static MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
void setVarArgsGPRIndex(int Index)
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool Immutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
static void changeFPCCToAArch64CC(ISD::CondCode CC, AArch64CC::CondCode &CondCode, AArch64CC::CondCode &CondCode2)
changeFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64 CC.
bool empty() const
empty - Check if the array is empty.
use_iterator use_begin() const
Provide iteration support to walk over all uses of an SDNode.
const SDValue & getValue() const
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
unsigned MaxStoresPerMemmove
Specify maximum bytes of store instructions per memmove call.
static SDValue performSelectCCCombine(SDNode *N, SelectionDAG &DAG)
performSelectCCCombine - Target-specific DAG combining for ISD::SELECT_CC to match FMIN/FMAX patterns...
SDValue getExtLoad(ISD::LoadExtType ExtType, SDLoc dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, bool isVolatile, bool isNonTemporal, bool isInvariant, unsigned Alignment, const AAMDNodes &AAInfo=AAMDNodes())
Bit counting operators with an undefined result for zero inputs.
MachineInstrBuilder BuildMI(MachineFunction &MF, DebugLoc DL, const MCInstrDesc &MCID)
BuildMI - Builder interface.
Helper structure to be able to read SetCC information.
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
static SDValue addRequiredExtensionForVectorMULL(SDValue N, SelectionDAG &DAG, const EVT &OrigTy, const EVT &ExtTy, unsigned ExtOpcode)
MO_HI12 - This flag indicates that a symbol operand represents the bits 13-24 of a 64-bit address...
EVT - Extended Value Type.
bool isIntN(unsigned N, int64_t x)
isIntN - Checks if an signed integer fits into the given (dynamic) bit width.
bool isPointerTy() const
isPointerTy - True if this is an instance of PointerType.
std::vector< ArgListEntry > ArgListTy
static SDValue performIntToFpCombine(SDNode *N, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
const APFloat & getValueAPF() const
unsigned getNextStackOffset() const
static bool isSingletonEXTMask(ArrayRef< int > M, EVT VT, unsigned &Imm)
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
CallInst * CreateCall(Value *Callee, ArrayRef< Value * > Args=None, const Twine &Name="")
This structure contains all information that is necessary for lowering calls.
PointerType * getPointerTo(unsigned AddrSpace=0)
getPointerTo - Return a pointer to the current type.
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
static SDValue GenerateTBL(SDValue Op, ArrayRef< int > ShuffleMask, SelectionDAG &DAG)
static cl::opt< bool > EnableAArch64ExtrGeneration("aarch64-extr-generation", cl::Hidden, cl::desc("Allow AArch64 (or (shift)(shift))->extract"), cl::init(true))
const uint32_t * getTLSCallPreservedMask() const
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements)
getVectorVT - Returns the EVT that represents a vector NumElements in length, where each element is o...
MachinePointerInfo - This class contains a discriminated union of information about pointers in memor...
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
MO_TLS - Indicates that the operand being accessed is some kind of thread-local symbol.
unsigned char ClassifyGlobalReference(const GlobalValue *GV, const TargetMachine &TM) const
ClassifyGlobalReference - Find the target operand flags that describe how a global value should be re...
static SDValue LowerXOR(SDValue Op, SelectionDAG &DAG)
static uint8_t encodeAdvSIMDModImmType12(uint64_t Imm)
static SDValue performSTORECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, SelectionDAG &DAG, const AArch64Subtarget *Subtarget)
static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG)
NarrowVector - Given a value in the V128 register class, produce the equivalent value in the V64 regi...
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
Triple - Helper class for working with autoconf configuration names.
const MachinePointerInfo & getPointerInfo() const
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
unsigned getByValAlign() const
static bool findEXTRHalf(SDValue N, SDValue &Src, uint32_t &ShiftAmount, bool &FromHi)
An EXTR instruction is made up of two shifts, ORed together.
static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
void setHasExtractBitsInsn(bool hasExtractInsn=true)
Tells the code generator that the target has BitExtract instructions.
static cl::opt< bool > EnableAArch64SlrGeneration("aarch64-shift-insert-generation", cl::Hidden, cl::desc("Allow AArch64 SLI/SRI formation"), cl::init(false))
static uint8_t encodeAdvSIMDModImmType2(uint64_t Imm)
ArrayRef< int > getMask() const
unsigned countTrailingZeros() const
Count the number of trailing zero bits.
Libcall getFPEXT(EVT OpVT, EVT RetVT)
getFPEXT - Return the FPEXT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space...
SDValue getTargetConstantPool(const Constant *C, EVT VT, unsigned Align=0, int Offset=0, unsigned char TargetFlags=0)
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned char TargetFlags=0)
bool hasExternalWeakLinkage() const
TokenFactor - This node takes multiple tokens as input and produces a single token result...
static int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
static SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG)
EXTRACT_SUBREG - This instruction takes two operands: a register that has subregisters, and a subregister index.
SDValue getNOT(SDLoc DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
CCState - This class holds information needed while lowering arguments and return values...
bool MaskAndBranchFoldingIsLegal
MaskAndBranchFoldingIsLegal - Indicates if the target supports folding a mask of a single bit...
static unsigned getIntrinsicID(const SDNode *N)
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="")
EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc, MachineFunction &MF) const override
Returns the target specific optimal type for load and store operations as a result of memset...
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
uint64_t getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
unsigned getVectorNumElements() const
void setExceptionPointerRegister(unsigned R)
If set to a physical register, this sets the register that receives the exception address on entry to...
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
StructType::get - This static method is the primary way to create a literal StructType.
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override
EVT is not used in-tree, but is used by out-of-tree target.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
SDValue getTargetInsertSubreg(int SRIdx, SDLoc DL, EVT VT, SDValue Operand, SDValue Subreg)
A convenience function for creating TargetInstrInfo::INSERT_SUBREG nodes.
AArch64TargetLowering(const TargetMachine &TM, const AArch64Subtarget &STI)
unsigned logBase2() const
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
bool isPredecessorOf(const SDNode *N) const
Return true if this node is a predecessor of N.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Type * getType() const
All values are typed, get the type of this value.
Instruction * user_back()
user_back - Specialize the methods defined in Value, as we know that an instruction can only be used ...
static bool memOpAlign(unsigned DstAlign, unsigned SrcAlign, unsigned AlignCheck)
Provides information about what library functions are available for the current target.
static bool isLegalArithImmed(uint64_t C)
CCValAssign - Represent assignment of one arg/retval to a location.
static unsigned getExtFactor(SDValue &V)
getExtFactor - Determine the adjustment factor for the position when generating an "extract from vect...
BRCOND - Conditional branch.
An SDNode that represents everything that will be needed to construct a MachineInstr.
static bool isAllConstantBuildVector(const SDValue &PotentialBVec, uint64_t &ConstVal)
const SDValue & getChain() const
Byte Swap and Counting operators.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
BasicBlock * GetInsertBlock() const
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
static SDValue tryCombineShiftImm(unsigned IID, SDNode *N, SelectionDAG &DAG)
static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, SDValue RHS, SelectionDAG &DAG, SDLoc dl)
GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit the specified operations t...
static Constant * get(Type *Ty, uint64_t V, bool isSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
Helper structure to keep track of ISD::SET_CC operands.
MachineFrameInfo * getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
bool is64BitVector() const
is64BitVector - Return true if this is a 64-bit vector type.
Represents one node in the SelectionDAG.
bool isProfitableToHoist(Instruction *I) const override
Check if it is profitable to hoist instruction in then/else to if.
CondCode getSetCCInverse(CondCode Operation, bool isInteger)
getSetCCInverse - Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operat...
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
void setAdjustsStack(bool V)
static bool isSignExtended(SDNode *N, SelectionDAG &DAG)
static MachinePointerInfo getStack(int64_t Offset)
getStack - stack pointer relative access.
static cl::opt< AlignMode > Align(cl::desc("Load/store alignment support"), cl::Hidden, cl::init(NoStrictAlign), cl::values(clEnumValN(StrictAlign,"aarch64-strict-align","Disallow all unaligned memory accesses"), clEnumValN(NoStrictAlign,"aarch64-no-strict-align","Allow unaligned memory accesses"), clEnumValEnd))
static bool selectCCOpsAreFMaxCompatible(SDValue Cmp, SDValue Result)
A SELECT_CC operation is really some kind of max or min if both values being compared are...
bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, unsigned Factor) const override
Lower an interleaved store into a stN intrinsic.
R Default(const T &Value) const
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
static mvt_range integer_valuetypes()
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static bool isZIPMask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
Value * CreateZExtOrBitCast(Value *V, Type *DestTy, const Twine &Name="")
bool is128BitVector() const
is128BitVector - Return true if this is a 128-bit vector type.
CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const
Selects the correct CCAssignFn for a given CallingConvention value.
Value * getArgOperand(unsigned i) const
getArgOperand/setArgOperand - Return/set the i-th call argument.
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
VectorType - Class to represent vector types.
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override
Returns true if the given (atomic) store should be expanded by the IR-level AtomicExpand pass into an...
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
void setIndexedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
Class for arbitrary precision integers.
bool isUInt< 32 >(uint64_t x)
void setExceptionSelectorRegister(unsigned R)
If set to a physical register, this sets the register that receives the exception typeid on entry to ...
iterator_range< use_iterator > uses()
A "pseudo-class" with methods for operating on BUILD_VECTORs.
Select(COND, TRUEVAL, FALSEVAL).
void setMinFunctionAlignment(unsigned Align)
Set the target's minimum function alignment (in log2(bytes))
int64_t getSExtValue() const
static void changeVectorFPCCToAArch64CC(ISD::CondCode CC, AArch64CC::CondCode &CondCode, AArch64CC::CondCode &CondCode2, bool &Invert)
changeVectorFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64 CC usable with the vector...
op_iterator op_begin() const
bool isIntegerTy() const
isIntegerTy - True if this is an instance of IntegerType.
static uint8_t encodeAdvSIMDModImmType10(uint64_t Imm)
static use_iterator use_end()
ZERO_EXTEND - Used for integer types, zeroing the new bits.
bool isPowerOf2_64(uint64_t Value)
isPowerOf2_64 - This function returns true if the argument is a power of two 0 (64 bit edition...
void AnalyzeCallOperands(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeCallOperands - Analyze the outgoing arguments to a call, incorporating info about the passed v...
LLVM_ATTRIBUTE_UNUSED_RESULT std::enable_if< !is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
SDValue getNode(unsigned Opcode, SDLoc DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
ANY_EXTEND - Used for integer types. The high bits are undefined.
static SDValue performNVCASTCombine(SDNode *N)
Get rid of unnecessary NVCASTs (that don't change the type).
static SDValue performMulCombine(SDNode *N, SelectionDAG &DAG, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Value * CreateTruncOrBitCast(Value *V, Type *DestTy, const Twine &Name="")
iterator insert(iterator I, T &&Elt)
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
uint64_t RoundUpToAlignment(uint64_t Value, uint64_t Align)
Returns the next integer (mod 2**64) that is greater than or equal to Value and is a multiple of Alig...
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(EVT VT) const
Return the preferred vector type legalization action.
bool isTargetDarwin() const
static SDValue skipExtensionForVectorMULL(SDNode *N, SelectionDAG &DAG)
iterator_range< value_op_iterator > op_values() const
APInt And(const APInt &LHS, const APInt &RHS)
Bitwise AND function for APInt.
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
static SDValue performPostLD1Combine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, bool IsLaneOp)
Target-specific DAG combine function for post-increment LD1 (lane) and post-increment LD1R...
static uint8_t encodeAdvSIMDModImmType5(uint64_t Imm)
static Constant * getSequentialMask(IRBuilder<> &Builder, unsigned Start, unsigned NumElts)
Get a mask consisting of sequential integers starting from Start.
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
BR_JT - Jumptable branch.
static mvt_range all_valuetypes()
SimpleValueType Iteration.
static MachinePointerInfo getGOT()
getGOT - Return a MachinePointerInfo record that refers to a GOT entry.
Representation of each machine instruction.
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the source.
static SDValue replaceSplatVectorStore(SelectionDAG &DAG, StoreSDNode *St)
Replace a splat of a scalar to a vector store by scalar stores of the scalar value.
static bool isAdvSIMDModImmType10(uint64_t Imm)
static uint8_t encodeAdvSIMDModImmType8(uint64_t Imm)
Libcall getFPTOUINT(EVT OpVT, EVT RetVT)
getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned char TargetFlags=0)
uint64_t getConstantOperandVal(unsigned i) const
bool shouldExpandAtomicLoadInIR(LoadInst *LI) const override
Returns true if the given (atomic) load should be expanded by the IR-level AtomicExpand pass into a l...
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG)
Bitwise operators - logical and, logical or, logical xor.
pointer data()
Return a pointer to the vector's buffer, even if empty().
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) const override
createFastISel - This method returns a target specific FastISel object, or null if the target does no...
bool equals_lower(StringRef RHS) const
equals_lower - Check for string equality, ignoring case.
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
static uint8_t encodeAdvSIMDModImmType4(uint64_t Imm)
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
static uint8_t encodeAdvSIMDModImmType11(uint64_t Imm)
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
unsigned getSizeInBits() const
getSizeInBits - Return the size of the specified value type in bits.
int getVarArgsFPRIndex() const
void ReplaceAllUsesWith(SDValue From, SDValue Op)
Modify anything using 'From' to use 'To' instead.
static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG)
ArrayRef< SDUse > ops() const
void AnalyzeReturn(const SmallVectorImpl< ISD::OutputArg > &Outs, CCAssignFn Fn)
AnalyzeReturn - Analyze the returned values of a return, incorporating info about the result values i...
FunctionType * getFunctionType() const
Fast - This calling convention attempts to make calls as fast as possible (e.g.
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
APFloat abs(APFloat X)
Returns the absolute value of the argument.
MO_PAGEOFF - A symbol operand with this flag represents the offset of that symbol within a 4K page...
unsigned MaxStoresPerMemmoveOptSize
Maximum number of store instructions that may be substituted for a call to memmove, used for functions with OpSize attribute.
unsigned MaxStoresPerMemcpyOptSize
Maximum number of store operations that may be substituted for a call to memcpy, used for functions w...
Value * emitLoadLinked(IRBuilder<> &Builder, Value *Addr, AtomicOrdering Ord) const override
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type...
void setStackPointerRegisterToSaveRestore(unsigned R)
If set to a physical register, this specifies the register that llvm.savestack/llvm.restorestack should save and restore.
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, unsigned Intrinsic) const override
getTgtMemIntrinsic - Represent NEON load and store intrinsics as MemIntrinsicNodes.
op_iterator op_end() const
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
unsigned getVarArgsGPRSize() const
static SDValue performIntrinsicCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget)
VectorType * getType() const
getType - Overload to return most specific vector type.
MachineSDNode * getMachineNode(unsigned Opcode, SDLoc dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s), MachineInstr opcode, and operands.
static SDValue emitComparison(SDValue LHS, SDValue RHS, ISD::CondCode CC, SDLoc dl, SelectionDAG &DAG)
FSINCOS - Compute both fsin and fcos as a single operation.
bool useLoadStackGuardNode() const override
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
unsigned MaxStoresPerMemcpy
Specify maximum bytes of store instructions per memcpy call.
bool isAtLeastRelease(AtomicOrdering Ord)
Returns true if the ordering is at least as strong as release (i.e.
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSS, const AllocaInst *Alloca=nullptr)
Create a new statically sized stack object, returning a nonnegative identifier to represent it...
EVT getValueType() const
Return the ValueType of the referenced return value.
void setBytesInStackArgArea(unsigned bytes)
Libcall getSINTTOFP(EVT OpVT, EVT RetVT)
getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none...
SDValue getConstant(uint64_t Val, SDLoc DL, EVT VT, bool isTarget=false, bool isOpaque=false)
bool is128BitVector() const
is128BitVector - Return true if this is a 128-bit vector type.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
bool isLegalAddImmediate(int64_t) const override
Return true if the specified immediate is legal add immediate, that is the target has add instruction...
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
SDValue getSelect(SDLoc DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS)
Helper function to make it easier to build Select's if you just have operands and don't want to check...
unsigned getReg() const
getReg - Returns the register number.
bool isFloatingPoint() const
isFloatingPoint - Return true if this is a FP, or a vector FP type.
bool EnableExtLdPromotion
void insert(iterator MBBI, MachineBasicBlock *MBB)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
static AArch64CC::CondCode changeIntCCToAArch64CC(ISD::CondCode CC)
changeIntCCToAArch64CC - Convert a DAG integer condition code to an AArch64 CC
static bool isSetCC(SDValue Op, SetCCInfoAndKind &SetCCInfo)
Check whether or not Op is a SET_CC operation, either a generic or an AArch64 lowered one...
void setReturnAddressIsTaken(bool s)
bool isSimple() const
isSimple - Test if the given EVT is simple (as opposed to being extended).
unsigned getAlignment() const
MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
SDValue getTargetExtractSubreg(int SRIdx, SDLoc DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
unsigned getPrimitiveSizeInBits() const LLVM_READONLY
getPrimitiveSizeInBits - Return the basic size of this type if it is a primitive type.
Module * getParent()
Get the module that this global value is contained inside of...
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="")
LLVM Value Representation.
FMA - Perform a * b + c with no intermediate rounding step.
SDValue getRegister(unsigned Reg, EVT VT)
unsigned getOpcode() const
getOpcode() returns a member of one of the enums like Instruction::Add.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
const AArch64InstrInfo * getInstrInfo() const override
static void ReplaceBITCASTResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG)
static VectorType * get(Type *ElementType, unsigned NumElements)
VectorType::get - This static method is the primary way to construct an VectorType.
SDValue getValueType(EVT)
Disable implicit floating point insts.
Value * emitStoreConditional(IRBuilder<> &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override
Perform a store-conditional operation to Addr.
static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt)
getVShiftImm - Check if this is a valid build_vector for the immediate operand of a vector shift oper...
PREFETCH - This corresponds to a prefetch intrinsic.
static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, bool isSigned)
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
uint64_t getTypeSizeInBits(Type *Ty) const
Size examples:
unsigned getMaxSupportedInterleaveFactor() const override
Get the maximum supported factor for interleaved memory accesses.
BasicBlockListType::iterator iterator
const TargetLowering & getTargetLoweringInfo() const
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.Val alone...
Primary interface to the complete machine description for the target machine.
C - The default llvm calling convention, compatible with C.
StringRef - Represent a constant reference to a string, i.e.
MO_NC - Indicates whether the linker is expected to check the symbol reference for overflow...
SetCC operator - This evaluates to a true value iff the condition is true.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
static uint8_t encodeAdvSIMDModImmType9(uint64_t Imm)
unsigned MaxStoresPerMemsetOptSize
Maximum number of stores operations that may be substituted for the call to memset, used for functions with OptSize attribute.
static bool isConcatMask(ArrayRef< int > Mask, EVT VT, bool SplitLHS)
static bool isAdvSIMDModImmType11(uint64_t Imm)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml","ocaml 3.10-compatible collector")
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
SDValue getSetCC(SDLoc DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
bool operator==(uint64_t V1, const APInt &V2)
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
unsigned getLocMemOffset() const
MVT getVectorElementType() const
static bool isVolatile(Instruction *Inst)
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
LowerOperation - Provide custom lowering hooks for some operations.
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
unsigned getNumUses() const
This method computes the number of uses of this Value.
TRUNCATE - Completely drop the high bits.
bool isUIntN(unsigned N, uint64_t x)
isUIntN - Checks if an unsigned integer fits into the given (dynamic) bit width.
unsigned getAlignment() const
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation...
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
addReg - Add a new virtual register operand...
MVT getSimpleValueType(unsigned ResNo) const
Return the type of a specified result as a simple type.
bool isShuffleMaskLegal(const SmallVectorImpl< int > &M, EVT VT) const override
isShuffleMaskLegal - Return true if the given shuffle mask can be codegen'd directly, or if it should be stack expanded.
bool is64BitVector() const
is64BitVector - Return true if this is a 64-bit vector type.
FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW, FLOG, FLOG2, FLOG10, FEXP, FEXP2, FCEIL, FTRUNC, FRINT, FNEARBYINT, FROUND, FFLOOR - Perform various unary floating point operations.
static uint8_t encodeAdvSIMDModImmType7(uint64_t Imm)
Value * getPointerOperand()
void addSuccessor(MachineBasicBlock *succ, uint32_t weight=0)
addSuccessor - Add succ as a successor of this MachineBasicBlock.
unsigned Log2_64(uint64_t Value)
Log2_64 - This function returns the floor log base 2 of the specified value, -1 if the value is zero...
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
static bool isSplatMask(const int *Mask, EVT VT)
static bool resolveBuildVector(BuildVectorSDNode *BVN, APInt &CnstBits, APInt &UndefBits)
EVT changeVectorElementTypeToInteger() const
changeVectorElementTypeToInteger - Return a vector with the same number of elements as this vector...
bool PredictableSelectIsExpensive
Tells the code generator that select is more expensive than a branch if the branch is usually predict...
MVT getSimpleVT() const
getSimpleVT - Return the SimpleValueType held in the specified simple EVT.
SDValue getIntPtrConstant(uint64_t Val, SDLoc DL, bool isTarget=false)
SDValue getMemIntrinsicNode(unsigned Opcode, SDLoc dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align=0, bool Vol=false, bool ReadMem=true, bool WriteMem=true, unsigned Size=0)
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode...
LLVMContext & getContext() const
Get the global data context.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
bool isFMAFasterThanFMulAndFAdd(EVT VT) const override
isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster than a pair of fmul and fadd i...
static SDValue tryLowerToSLI(SDNode *N, SelectionDAG &DAG)
void setIndexedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
gep_type_iterator gep_type_begin(const User *GEP)
uint64_t getZExtValue() const
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
unsigned getVectorNumElements() const
getVectorNumElements - Given a vector type, return the number of elements it contains.
void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, SDValue &NewRHS, ISD::CondCode &CCCode, SDLoc DL) const
SoftenSetCCOperands - Soften the operands of a comparison.
This class is used to represent ISD::LOAD nodes.
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary...
Function must be optimized for size first.